hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
d288641660d82530ccf5edfea711c8740ad0ef57b2cae2bee222770cbe03b313 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import ctypes
import gc
import itertools
import math
import re
import time
import warnings
from contextlib import suppress
import numpy as np
from .base import DELAYED, ExtensionHDU, BITPIX2DTYPE, DTYPE2BITPIX
from .image import ImageHDU
from .table import BinTableHDU
from astropy.io.fits import conf
from astropy.io.fits.card import Card
from astropy.io.fits.column import Column, ColDefs, TDEF_RE
from astropy.io.fits.column import KEYWORD_NAMES as TABLE_KEYWORD_NAMES
from astropy.io.fits.fitsrec import FITS_rec
from astropy.io.fits.header import Header
from astropy.io.fits.util import (_is_pseudo_integer, _pseudo_zero, _is_int,
_get_array_mmap)
from astropy.utils import lazyproperty
from astropy.utils.exceptions import AstropyUserWarning
try:
from astropy.io.fits import compression
COMPRESSION_SUPPORTED = COMPRESSION_ENABLED = True
except ImportError:
COMPRESSION_SUPPORTED = COMPRESSION_ENABLED = False
# Quantization dithering method constants; these are right out of fitsio.h
NO_DITHER = -1
SUBTRACTIVE_DITHER_1 = 1
SUBTRACTIVE_DITHER_2 = 2
QUANTIZE_METHOD_NAMES = {
NO_DITHER: 'NO_DITHER',
SUBTRACTIVE_DITHER_1: 'SUBTRACTIVE_DITHER_1',
SUBTRACTIVE_DITHER_2: 'SUBTRACTIVE_DITHER_2'
}
DITHER_SEED_CLOCK = 0
DITHER_SEED_CHECKSUM = -1
COMPRESSION_TYPES = ('RICE_1', 'GZIP_1', 'GZIP_2', 'PLIO_1', 'HCOMPRESS_1')
# Default compression parameter values
DEFAULT_COMPRESSION_TYPE = 'RICE_1'
DEFAULT_QUANTIZE_LEVEL = 16.
DEFAULT_QUANTIZE_METHOD = NO_DITHER
DEFAULT_DITHER_SEED = DITHER_SEED_CLOCK
DEFAULT_HCOMP_SCALE = 0
DEFAULT_HCOMP_SMOOTH = 0
DEFAULT_BLOCK_SIZE = 32
DEFAULT_BYTE_PIX = 4
CMTYPE_ALIASES = {'RICE_ONE': 'RICE_1'}
COMPRESSION_KEYWORDS = {'ZIMAGE', 'ZCMPTYPE', 'ZBITPIX', 'ZNAXIS', 'ZMASKCMP',
'ZSIMPLE', 'ZTENSION', 'ZEXTEND'}
class CompImageHeader(Header):
"""
Header object for compressed image HDUs designed to keep the compression
header and the underlying image header properly synchronized.
This essentially wraps the image header, so that all values are read from
and written to the image header. However, updates to the image header will
also update the table header where appropriate.
Note that if no image header is passed in, the code will instantiate a
regular `~astropy.io.fits.Header`.
"""
# TODO: The difficulty of implementing this screams a need to rewrite this
# module
_keyword_remaps = {
'SIMPLE': 'ZSIMPLE', 'XTENSION': 'ZTENSION', 'BITPIX': 'ZBITPIX',
'NAXIS': 'ZNAXIS', 'EXTEND': 'ZEXTEND', 'BLOCKED': 'ZBLOCKED',
'PCOUNT': 'ZPCOUNT', 'GCOUNT': 'ZGCOUNT', 'CHECKSUM': 'ZHECKSUM',
'DATASUM': 'ZDATASUM'
}
_zdef_re = re.compile(r'(?P<label>^[Zz][a-zA-Z]*)(?P<num>[1-9][0-9 ]*$)?')
_compression_keywords = set(_keyword_remaps.values()).union(
['ZIMAGE', 'ZCMPTYPE', 'ZMASKCMP', 'ZQUANTIZ', 'ZDITHER0'])
_indexed_compression_keywords = {'ZNAXIS', 'ZTILE', 'ZNAME', 'ZVAL'}
# TODO: Once it place it should be possible to manage some of this through
# the schema system, but it's not quite ready for that yet. Also it still
# makes more sense to change CompImageHDU to subclass ImageHDU :/
def __new__(cls, table_header, image_header=None):
# 2019-09-14 (MHvK): No point wrapping anything if no image_header is
# given. This happens if __getitem__ and copy are called - our super
# class will aim to initialize a new, possibly partially filled
# header, but we cannot usefully deal with that.
# TODO: the above suggests strongly we should *not* subclass from
# Header. See also comment above about the need for reorganization.
if image_header is None:
return Header(table_header)
else:
return super().__new__(cls)
def __init__(self, table_header, image_header):
self._cards = image_header._cards
self._keyword_indices = image_header._keyword_indices
self._rvkc_indices = image_header._rvkc_indices
self._modified = image_header._modified
self._table_header = table_header
# We need to override and Header methods that can modify the header, and
# ensure that they sync with the underlying _table_header
def __setitem__(self, key, value):
# This isn't pretty, but if the `key` is either an int or a tuple we
# need to figure out what keyword name that maps to before doing
# anything else; these checks will be repeated later in the
# super().__setitem__ call but I don't see another way around it
# without some major refactoring
if self._set_slice(key, value, self):
return
if isinstance(key, int):
keyword, index = self._keyword_from_index(key)
elif isinstance(key, tuple):
keyword, index = key
else:
# We don't want to specify and index otherwise, because that will
# break the behavior for new keywords and for commentary keywords
keyword, index = key, None
if self._is_reserved_keyword(keyword):
return
super().__setitem__(key, value)
if index is not None:
remapped_keyword = self._remap_keyword(keyword)
self._table_header[remapped_keyword, index] = value
# Else this will pass through to ._update
def __delitem__(self, key):
if isinstance(key, slice) or self._haswildcard(key):
# If given a slice pass that on to the superclass and bail out
# early; we only want to make updates to _table_header when given
# a key specifying a single keyword
return super().__delitem__(key)
if isinstance(key, int):
keyword, index = self._keyword_from_index(key)
elif isinstance(key, tuple):
keyword, index = key
else:
keyword, index = key, None
if key not in self:
raise KeyError(f"Keyword {key!r} not found.")
super().__delitem__(key)
remapped_keyword = self._remap_keyword(keyword)
if remapped_keyword in self._table_header:
if index is not None:
del self._table_header[(remapped_keyword, index)]
else:
del self._table_header[remapped_keyword]
def append(self, card=None, useblanks=True, bottom=False, end=False):
# This logic unfortunately needs to be duplicated from the base class
# in order to determine the keyword
if isinstance(card, str):
card = Card(card)
elif isinstance(card, tuple):
card = Card(*card)
elif card is None:
card = Card()
elif not isinstance(card, Card):
raise ValueError(
'The value appended to a Header must be either a keyword or '
'(keyword, value, [comment]) tuple; got: {!r}'.format(card))
if self._is_reserved_keyword(card.keyword):
return
super().append(card=card, useblanks=useblanks, bottom=bottom, end=end)
remapped_keyword = self._remap_keyword(card.keyword)
# card.keyword strips the HIERARCH if present so this must be added
# back to avoid a warning.
if str(card).startswith("HIERARCH ") and not remapped_keyword.startswith("HIERARCH "):
remapped_keyword = "HIERARCH " + remapped_keyword
card = Card(remapped_keyword, card.value, card.comment)
# Here we disable the use of blank cards, because the call above to
# Header.append may have already deleted a blank card in the table
# header, thanks to inheritance: Header.append calls 'del self[-1]'
# to delete a blank card, which calls CompImageHeader.__deltitem__,
# which deletes the blank card both in the image and the table headers!
self._table_header.append(card=card, useblanks=False,
bottom=bottom, end=end)
def insert(self, key, card, useblanks=True, after=False):
if isinstance(key, int):
# Determine condition to pass through to append
if after:
if key == -1:
key = len(self._cards)
else:
key += 1
if key >= len(self._cards):
self.append(card, end=True)
return
if isinstance(card, str):
card = Card(card)
elif isinstance(card, tuple):
card = Card(*card)
elif not isinstance(card, Card):
raise ValueError(
'The value inserted into a Header must be either a keyword or '
'(keyword, value, [comment]) tuple; got: {!r}'.format(card))
if self._is_reserved_keyword(card.keyword):
return
# Now the tricky part is to determine where to insert in the table
# header. If given a numerical index we need to map that to the
# corresponding index in the table header. Although rare, there may be
# cases where there is no mapping in which case we just try the same
# index
# NOTE: It is crucial that remapped_index in particular is figured out
# before the image header is modified
remapped_index = self._remap_index(key)
remapped_keyword = self._remap_keyword(card.keyword)
super().insert(key, card, useblanks=useblanks, after=after)
card = Card(remapped_keyword, card.value, card.comment)
# Here we disable the use of blank cards, because the call above to
# Header.insert may have already deleted a blank card in the table
# header, thanks to inheritance: Header.insert calls 'del self[-1]'
# to delete a blank card, which calls CompImageHeader.__delitem__,
# which deletes the blank card both in the image and the table headers!
self._table_header.insert(remapped_index, card, useblanks=False,
after=after)
def _update(self, card):
keyword = card[0]
if self._is_reserved_keyword(keyword):
return
super()._update(card)
if keyword in Card._commentary_keywords:
# Otherwise this will result in a duplicate insertion
return
remapped_keyword = self._remap_keyword(keyword)
self._table_header._update((remapped_keyword,) + card[1:])
# Last piece needed (I think) for synchronizing with the real header
# This one is tricky since _relativeinsert calls insert
def _relativeinsert(self, card, before=None, after=None, replace=False):
keyword = card[0]
if self._is_reserved_keyword(keyword):
return
# Now we have to figure out how to remap 'before' and 'after'
if before is None:
if isinstance(after, int):
remapped_after = self._remap_index(after)
else:
remapped_after = self._remap_keyword(after)
remapped_before = None
else:
if isinstance(before, int):
remapped_before = self._remap_index(before)
else:
remapped_before = self._remap_keyword(before)
remapped_after = None
super()._relativeinsert(card, before=before, after=after,
replace=replace)
remapped_keyword = self._remap_keyword(keyword)
card = Card(remapped_keyword, card[1], card[2])
self._table_header._relativeinsert(card, before=remapped_before,
after=remapped_after,
replace=replace)
@classmethod
def _is_reserved_keyword(cls, keyword, warn=True):
msg = ('Keyword {!r} is reserved for use by the FITS Tiled Image '
'Convention and will not be stored in the header for the '
'image being compressed.'.format(keyword))
if keyword == 'TFIELDS':
if warn:
warnings.warn(msg)
return True
m = TDEF_RE.match(keyword)
if m and m.group('label').upper() in TABLE_KEYWORD_NAMES:
if warn:
warnings.warn(msg)
return True
m = cls._zdef_re.match(keyword)
if m:
label = m.group('label').upper()
num = m.group('num')
if num is not None and label in cls._indexed_compression_keywords:
if warn:
warnings.warn(msg)
return True
elif label in cls._compression_keywords:
if warn:
warnings.warn(msg)
return True
return False
@classmethod
def _remap_keyword(cls, keyword):
# Given a keyword that one might set on an image, remap that keyword to
# the name used for it in the COMPRESSED HDU header
# This is mostly just a lookup in _keyword_remaps, but needs handling
# for NAXISn keywords
is_naxisn = False
if keyword[:5] == 'NAXIS':
with suppress(ValueError):
index = int(keyword[5:])
is_naxisn = index > 0
if is_naxisn:
return f'ZNAXIS{index}'
# If the keyword does not need to be remapped then just return the
# original keyword
return cls._keyword_remaps.get(keyword, keyword)
def _remap_index(self, idx):
# Given an integer index into this header, map that to the index in the
# table header for the same card. If the card doesn't exist in the
# table header (generally should *not* be the case) this will just
# return the same index
# This *does* also accept a keyword or (keyword, repeat) tuple and
# obtains the associated numerical index with self._cardindex
if not isinstance(idx, int):
idx = self._cardindex(idx)
keyword, repeat = self._keyword_from_index(idx)
remapped_insert_keyword = self._remap_keyword(keyword)
with suppress(IndexError, KeyError):
idx = self._table_header._cardindex((remapped_insert_keyword,
repeat))
return idx
def clear(self):
"""
Remove all cards from the header.
"""
self._table_header.clear()
super().clear()
# TODO: Fix this class so that it doesn't actually inherit from BinTableHDU,
# but instead has an internal BinTableHDU reference
class CompImageHDU(BinTableHDU):
"""
Compressed Image HDU class.
"""
_manages_own_heap = True
"""
The calls to CFITSIO lay out the heap data in memory, and we write it out
the same way CFITSIO organizes it. In principle this would break if a user
manually changes the underlying compressed data by hand, but there is no
reason they would want to do that (and if they do that's their
responsibility).
"""
_default_name = "COMPRESSED_IMAGE"
def __init__(self, data=None, header=None, name=None,
compression_type=DEFAULT_COMPRESSION_TYPE,
tile_size=None,
hcomp_scale=DEFAULT_HCOMP_SCALE,
hcomp_smooth=DEFAULT_HCOMP_SMOOTH,
quantize_level=DEFAULT_QUANTIZE_LEVEL,
quantize_method=DEFAULT_QUANTIZE_METHOD,
dither_seed=DEFAULT_DITHER_SEED,
do_not_scale_image_data=False,
uint=False, scale_back=False, **kwargs):
"""
Parameters
----------
data : array, optional
Uncompressed image data
header : `~astropy.io.fits.Header`, optional
Header to be associated with the image; when reading the HDU from a
file (data=DELAYED), the header read from the file
name : str, optional
The ``EXTNAME`` value; if this value is `None`, then the name from
the input image header will be used; if there is no name in the
input image header then the default name ``COMPRESSED_IMAGE`` is
used.
compression_type : str, optional
Compression algorithm: one of
``'RICE_1'``, ``'RICE_ONE'``, ``'PLIO_1'``, ``'GZIP_1'``,
``'GZIP_2'``, ``'HCOMPRESS_1'``
tile_size : int, optional
Compression tile sizes. Default treats each row of image as a
tile.
hcomp_scale : float, optional
HCOMPRESS scale parameter
hcomp_smooth : float, optional
HCOMPRESS smooth parameter
quantize_level : float, optional
Floating point quantization level; see note below
quantize_method : int, optional
Floating point quantization dithering method; can be either
``NO_DITHER`` (-1; default), ``SUBTRACTIVE_DITHER_1`` (1), or
``SUBTRACTIVE_DITHER_2`` (2); see note below
dither_seed : int, optional
Random seed to use for dithering; can be either an integer in the
range 1 to 1000 (inclusive), ``DITHER_SEED_CLOCK`` (0; default), or
``DITHER_SEED_CHECKSUM`` (-1); see note below
Notes
-----
The astropy.io.fits package supports 2 methods of image compression:
1) The entire FITS file may be externally compressed with the gzip
or pkzip utility programs, producing a ``*.gz`` or ``*.zip``
file, respectively. When reading compressed files of this type,
Astropy first uncompresses the entire file into a temporary file
before performing the requested read operations. The
astropy.io.fits package does not support writing to these types
of compressed files. This type of compression is supported in
the ``_File`` class, not in the `CompImageHDU` class. The file
compression type is recognized by the ``.gz`` or ``.zip`` file
name extension.
2) The `CompImageHDU` class supports the FITS tiled image
compression convention in which the image is subdivided into a
grid of rectangular tiles, and each tile of pixels is
individually compressed. The details of this FITS compression
convention are described at the `FITS Support Office web site
<https://fits.gsfc.nasa.gov/registry/tilecompression.html>`_.
Basically, the compressed image tiles are stored in rows of a
variable length array column in a FITS binary table. The
astropy.io.fits recognizes that this binary table extension
contains an image and treats it as if it were an image
extension. Under this tile-compression format, FITS header
keywords remain uncompressed. At this time, Astropy does not
support the ability to extract and uncompress sections of the
image without having to uncompress the entire image.
The astropy.io.fits package supports 3 general-purpose compression
algorithms plus one other special-purpose compression technique that is
designed for data masks with positive integer pixel values. The 3
general purpose algorithms are GZIP, Rice, and HCOMPRESS, and the
special-purpose technique is the IRAF pixel list compression technique
(PLIO). The ``compression_type`` parameter defines the compression
algorithm to be used.
The FITS image can be subdivided into any desired rectangular grid of
compression tiles. With the GZIP, Rice, and PLIO algorithms, the
default is to take each row of the image as a tile. The HCOMPRESS
algorithm is inherently 2-dimensional in nature, so the default in this
case is to take 16 rows of the image per tile. In most cases, it makes
little difference what tiling pattern is used, so the default tiles are
usually adequate. In the case of very small images, it could be more
efficient to compress the whole image as a single tile. Note that the
image dimensions are not required to be an integer multiple of the tile
dimensions; if not, then the tiles at the edges of the image will be
smaller than the other tiles. The ``tile_size`` parameter may be
provided as a list of tile sizes, one for each dimension in the image.
For example a ``tile_size`` value of ``[100,100]`` would divide a 300 X
300 image into 9 100 X 100 tiles.
The 4 supported image compression algorithms are all 'lossless' when
applied to integer FITS images; the pixel values are preserved exactly
with no loss of information during the compression and uncompression
process. In addition, the HCOMPRESS algorithm supports a 'lossy'
compression mode that will produce larger amount of image compression.
This is achieved by specifying a non-zero value for the ``hcomp_scale``
parameter. Since the amount of compression that is achieved depends
directly on the RMS noise in the image, it is usually more convenient
to specify the ``hcomp_scale`` factor relative to the RMS noise.
Setting ``hcomp_scale = 2.5`` means use a scale factor that is 2.5
times the calculated RMS noise in the image tile. In some cases it may
be desirable to specify the exact scaling to be used, instead of
specifying it relative to the calculated noise value. This may be done
by specifying the negative of the desired scale value (typically in the
range -2 to -100).
Very high compression factors (of 100 or more) can be achieved by using
large ``hcomp_scale`` values, however, this can produce undesirable
'blocky' artifacts in the compressed image. A variation of the
HCOMPRESS algorithm (called HSCOMPRESS) can be used in this case to
apply a small amount of smoothing of the image when it is uncompressed
to help cover up these artifacts. This smoothing is purely cosmetic
and does not cause any significant change to the image pixel values.
Setting the ``hcomp_smooth`` parameter to 1 will engage the smoothing
algorithm.
Floating point FITS images (which have ``BITPIX`` = -32 or -64) usually
contain too much 'noise' in the least significant bits of the mantissa
of the pixel values to be effectively compressed with any lossless
algorithm. Consequently, floating point images are first quantized
into scaled integer pixel values (and thus throwing away much of the
noise) before being compressed with the specified algorithm (either
GZIP, RICE, or HCOMPRESS). This technique produces much higher
compression factors than simply using the GZIP utility to externally
compress the whole FITS file, but it also means that the original
floating point value pixel values are not exactly preserved. When done
properly, this integer scaling technique will only discard the
insignificant noise while still preserving all the real information in
the image. The amount of precision that is retained in the pixel
values is controlled by the ``quantize_level`` parameter. Larger
values will result in compressed images whose pixels more closely match
the floating point pixel values, but at the same time the amount of
compression that is achieved will be reduced. Users should experiment
with different values for this parameter to determine the optimal value
that preserves all the useful information in the image, without
needlessly preserving all the 'noise' which will hurt the compression
efficiency.
The default value for the ``quantize_level`` scale factor is 16, which
means that scaled integer pixel values will be quantized such that the
difference between adjacent integer values will be 1/16th of the noise
level in the image background. An optimized algorithm is used to
accurately estimate the noise in the image. As an example, if the RMS
noise in the background pixels of an image = 32.0, then the spacing
between adjacent scaled integer pixel values will equal 2.0 by default.
Note that the RMS noise is independently calculated for each tile of
the image, so the resulting integer scaling factor may fluctuate
slightly for each tile. In some cases, it may be desirable to specify
the exact quantization level to be used, instead of specifying it
relative to the calculated noise value. This may be done by specifying
the negative of desired quantization level for the value of
``quantize_level``. In the previous example, one could specify
``quantize_level = -2.0`` so that the quantized integer levels differ
by 2.0. Larger negative values for ``quantize_level`` means that the
levels are more coarsely-spaced, and will produce higher compression
factors.
The quantization algorithm can also apply one of two random dithering
methods in order to reduce bias in the measured intensity of background
regions. The default method, specified with the constant
``SUBTRACTIVE_DITHER_1`` adds dithering to the zero-point of the
quantization array itself rather than adding noise to the actual image.
The random noise is added on a pixel-by-pixel basis, so in order
restore each pixel from its integer value to its floating point value
it is necessary to replay the same sequence of random numbers for each
pixel (see below). The other method, ``SUBTRACTIVE_DITHER_2``, is
exactly like the first except that before dithering any pixel with a
floating point value of ``0.0`` is replaced with the special integer
value ``-2147483647``. When the image is uncompressed, pixels with
this value are restored back to ``0.0`` exactly. Finally, a value of
``NO_DITHER`` disables dithering entirely.
As mentioned above, when using the subtractive dithering algorithm it
is necessary to be able to generate a (pseudo-)random sequence of noise
for each pixel, and replay that same sequence upon decompressing. To
facilitate this, a random seed between 1 and 10000 (inclusive) is used
to seed a random number generator, and that seed is stored in the
``ZDITHER0`` keyword in the header of the compressed HDU. In order to
use that seed to generate the same sequence of random numbers the same
random number generator must be used at compression and decompression
time; for that reason the tiled image convention provides an
implementation of a very simple pseudo-random number generator. The
seed itself can be provided in one of three ways, controllable by the
``dither_seed`` argument: It may be specified manually, or it may be
generated arbitrarily based on the system's clock
(``DITHER_SEED_CLOCK``) or based on a checksum of the pixels in the
image's first tile (``DITHER_SEED_CHECKSUM``). The clock-based method
is the default, and is sufficient to ensure that the value is
reasonably "arbitrary" and that the same seed is unlikely to be
generated sequentially. The checksum method, on the other hand,
ensures that the same seed is used every time for a specific image.
This is particularly useful for software testing as it ensures that the
same image will always use the same seed.
"""
if not COMPRESSION_SUPPORTED:
# TODO: Raise a more specific Exception type
raise Exception('The astropy.io.fits.compression module is not '
'available. Creation of compressed image HDUs is '
'disabled.')
compression_type = CMTYPE_ALIASES.get(compression_type, compression_type)
if data is DELAYED:
# Reading the HDU from a file
super().__init__(data=data, header=header)
else:
# Create at least a skeleton HDU that matches the input
# header and data (if any were input)
super().__init__(data=None, header=header)
# Store the input image data
self.data = data
# Update the table header (_header) to the compressed
# image format and to match the input data (if any);
# Create the image header (_image_header) from the input
# image header (if any) and ensure it matches the input
# data; Create the initially empty table data array to
# hold the compressed data.
self._update_header_data(header, name,
compression_type=compression_type,
tile_size=tile_size,
hcomp_scale=hcomp_scale,
hcomp_smooth=hcomp_smooth,
quantize_level=quantize_level,
quantize_method=quantize_method,
dither_seed=dither_seed)
# TODO: A lot of this should be passed on to an internal image HDU o
# something like that, see ticket #88
self._do_not_scale_image_data = do_not_scale_image_data
self._uint = uint
self._scale_back = scale_back
self._axes = [self._header.get('ZNAXIS' + str(axis + 1), 0)
for axis in range(self._header.get('ZNAXIS', 0))]
# store any scale factors from the table header
if do_not_scale_image_data:
self._bzero = 0
self._bscale = 1
else:
self._bzero = self._header.get('BZERO', 0)
self._bscale = self._header.get('BSCALE', 1)
self._bitpix = self._header['ZBITPIX']
self._orig_bzero = self._bzero
self._orig_bscale = self._bscale
self._orig_bitpix = self._bitpix
def _remove_unnecessary_default_extnames(self, header):
"""Remove default EXTNAME values if they are unnecessary.
Some data files (eg from CFHT) can have the default EXTNAME and
an explicit value. This method removes the default if a more
specific header exists. It also removes any duplicate default
values.
"""
if 'EXTNAME' in header:
indices = header._keyword_indices['EXTNAME']
# Only continue if there is more than one found
n_extname = len(indices)
if n_extname > 1:
extnames_to_remove = [index for index in indices
if header[index] == self._default_name]
if len(extnames_to_remove) == n_extname:
# Keep the first (they are all the same)
extnames_to_remove.pop(0)
# Remove them all in reverse order to keep the index unchanged.
for index in reversed(sorted(extnames_to_remove)):
del header[index]
@property
def name(self):
# Convert the value to a string to be flexible in some pathological
# cases (see ticket #96)
# Similar to base class but uses .header rather than ._header
return str(self.header.get('EXTNAME', self._default_name))
@name.setter
def name(self, value):
# This is a copy of the base class but using .header instead
# of ._header to ensure that the name stays in sync.
if not isinstance(value, str):
raise TypeError("'name' attribute must be a string")
if not conf.extension_name_case_sensitive:
value = value.upper()
if 'EXTNAME' in self.header:
self.header['EXTNAME'] = value
else:
self.header['EXTNAME'] = (value, 'extension name')
@classmethod
def match_header(cls, header):
card = header.cards[0]
if card.keyword != 'XTENSION':
return False
xtension = card.value
if isinstance(xtension, str):
xtension = xtension.rstrip()
if xtension not in ('BINTABLE', 'A3DTABLE'):
return False
if 'ZIMAGE' not in header or not header['ZIMAGE']:
return False
if COMPRESSION_SUPPORTED and COMPRESSION_ENABLED:
return True
elif not COMPRESSION_SUPPORTED:
warnings.warn('Failure matching header to a compressed image '
'HDU: The compression module is not available.\n'
'The HDU will be treated as a Binary Table HDU.',
AstropyUserWarning)
return False
else:
# Compression is supported but disabled; just pass silently (#92)
return False
def _update_header_data(self, image_header,
name=None,
compression_type=None,
tile_size=None,
hcomp_scale=None,
hcomp_smooth=None,
quantize_level=None,
quantize_method=None,
dither_seed=None):
"""
Update the table header (`_header`) to the compressed
image format and to match the input data (if any). Create
the image header (`_image_header`) from the input image
header (if any) and ensure it matches the input
data. Create the initially-empty table data array to hold
the compressed data.
This method is mainly called internally, but a user may wish to
call this method after assigning new data to the `CompImageHDU`
object that is of a different type.
Parameters
----------
image_header : `~astropy.io.fits.Header`
header to be associated with the image
name : str, optional
the ``EXTNAME`` value; if this value is `None`, then the name from
the input image header will be used; if there is no name in the
input image header then the default name 'COMPRESSED_IMAGE' is used
compression_type : str, optional
compression algorithm 'RICE_1', 'PLIO_1', 'GZIP_1', 'GZIP_2',
'HCOMPRESS_1'; if this value is `None`, use value already in the
header; if no value already in the header, use 'RICE_1'
tile_size : sequence of int, optional
compression tile sizes as a list; if this value is `None`, use
value already in the header; if no value already in the header,
treat each row of image as a tile
hcomp_scale : float, optional
HCOMPRESS scale parameter; if this value is `None`, use the value
already in the header; if no value already in the header, use 1
hcomp_smooth : float, optional
HCOMPRESS smooth parameter; if this value is `None`, use the value
already in the header; if no value already in the header, use 0
quantize_level : float, optional
floating point quantization level; if this value is `None`, use the
value already in the header; if no value already in header, use 16
quantize_method : int, optional
floating point quantization dithering method; can be either
NO_DITHER (-1), SUBTRACTIVE_DITHER_1 (1; default), or
SUBTRACTIVE_DITHER_2 (2)
dither_seed : int, optional
random seed to use for dithering; can be either an integer in the
range 1 to 1000 (inclusive), DITHER_SEED_CLOCK (0; default), or
DITHER_SEED_CHECKSUM (-1)
"""
# Clean up EXTNAME duplicates
self._remove_unnecessary_default_extnames(self._header)
image_hdu = ImageHDU(data=self.data, header=self._header)
self._image_header = CompImageHeader(self._header, image_hdu.header)
self._axes = image_hdu._axes
del image_hdu
# Determine based on the size of the input data whether to use the Q
# column format to store compressed data or the P format.
# The Q format is used only if the uncompressed data is larger than
# 4 GB. This is not a perfect heuristic, as one can contrive an input
# array which, when compressed, the entire binary table representing
# the compressed data is larger than 4GB. That said, this is the same
# heuristic used by CFITSIO, so this should give consistent results.
# And the cases where this heuristic is insufficient are extreme and
# almost entirely contrived corner cases, so it will do for now
if self._has_data:
huge_hdu = self.data.nbytes > 2 ** 32
else:
huge_hdu = False
# Update the extension name in the table header
if not name and 'EXTNAME' not in self._header:
# Do not sync this with the image header since the default
# name is specific to the table header.
self._header.set('EXTNAME', self._default_name,
'name of this binary table extension',
after='TFIELDS')
elif name:
# Force the name into table and image headers.
self.name = name
# Set the compression type in the table header.
if compression_type:
if compression_type not in COMPRESSION_TYPES:
warnings.warn(
'Unknown compression type provided (supported are {}). '
'Default ({}) compression will be used.'
.format(', '.join(map(repr, COMPRESSION_TYPES)),
DEFAULT_COMPRESSION_TYPE),
AstropyUserWarning)
compression_type = DEFAULT_COMPRESSION_TYPE
self._header.set('ZCMPTYPE', compression_type,
'compression algorithm', after='TFIELDS')
else:
compression_type = self._header.get('ZCMPTYPE',
DEFAULT_COMPRESSION_TYPE)
compression_type = CMTYPE_ALIASES.get(compression_type,
compression_type)
# If the input image header had BSCALE/BZERO cards, then insert
# them in the table header.
if image_header:
bzero = image_header.get('BZERO', 0.0)
bscale = image_header.get('BSCALE', 1.0)
after_keyword = 'EXTNAME'
if bscale != 1.0:
self._header.set('BSCALE', bscale, after=after_keyword)
after_keyword = 'BSCALE'
if bzero != 0.0:
self._header.set('BZERO', bzero, after=after_keyword)
try:
bitpix_comment = image_header.comments['BITPIX']
except (AttributeError, KeyError):
bitpix_comment = 'data type of original image'
try:
naxis_comment = image_header.comments['NAXIS']
except (AttributeError, KeyError):
naxis_comment = 'dimension of original image'
# Set the label for the first column in the table
self._header.set('TTYPE1', 'COMPRESSED_DATA', 'label for field 1',
after='TFIELDS')
# Set the data format for the first column. It is dependent
# on the requested compression type.
if compression_type == 'PLIO_1':
tform1 = '1QI' if huge_hdu else '1PI'
else:
tform1 = '1QB' if huge_hdu else '1PB'
self._header.set('TFORM1', tform1,
'data format of field: variable length array',
after='TTYPE1')
# Create the first column for the table. This column holds the
# compressed data.
col1 = Column(name=self._header['TTYPE1'], format=tform1)
# Create the additional columns required for floating point
# data and calculate the width of the output table.
zbitpix = self._image_header['BITPIX']
if zbitpix < 0 and quantize_level != 0.0:
# floating point image has 'COMPRESSED_DATA',
# 'UNCOMPRESSED_DATA', 'ZSCALE', and 'ZZERO' columns (unless using
# lossless compression, per CFITSIO)
ncols = 4
# CFITSIO 3.28 and up automatically use the GZIP_COMPRESSED_DATA
# store floating point data that couldn't be quantized, instead
# of the UNCOMPRESSED_DATA column. There's no way to control
# this behavior so the only way to determine which behavior will
# be employed is via the CFITSIO version
ttype2 = 'GZIP_COMPRESSED_DATA'
# The required format for the GZIP_COMPRESSED_DATA is actually
# missing from the standard docs, but CFITSIO suggests it
# should be 1PB, which is logical.
tform2 = '1QB' if huge_hdu else '1PB'
# Set up the second column for the table that will hold any
# uncompressable data.
self._header.set('TTYPE2', ttype2, 'label for field 2',
after='TFORM1')
self._header.set('TFORM2', tform2,
'data format of field: variable length array',
after='TTYPE2')
col2 = Column(name=ttype2, format=tform2)
# Set up the third column for the table that will hold
# the scale values for quantized data.
self._header.set('TTYPE3', 'ZSCALE', 'label for field 3',
after='TFORM2')
self._header.set('TFORM3', '1D',
'data format of field: 8-byte DOUBLE',
after='TTYPE3')
col3 = Column(name=self._header['TTYPE3'],
format=self._header['TFORM3'])
# Set up the fourth column for the table that will hold
# the zero values for the quantized data.
self._header.set('TTYPE4', 'ZZERO', 'label for field 4',
after='TFORM3')
self._header.set('TFORM4', '1D',
'data format of field: 8-byte DOUBLE',
after='TTYPE4')
after = 'TFORM4'
col4 = Column(name=self._header['TTYPE4'],
format=self._header['TFORM4'])
# Create the ColDefs object for the table
cols = ColDefs([col1, col2, col3, col4])
else:
# default table has just one 'COMPRESSED_DATA' column
ncols = 1
after = 'TFORM1'
# remove any header cards for the additional columns that
# may be left over from the previous data
to_remove = ['TTYPE2', 'TFORM2', 'TTYPE3', 'TFORM3', 'TTYPE4',
'TFORM4']
for k in to_remove:
try:
del self._header[k]
except KeyError:
pass
# Create the ColDefs object for the table
cols = ColDefs([col1])
# Update the table header with the width of the table, the
# number of fields in the table, the indicator for a compressed
# image HDU, the data type of the image data and the number of
# dimensions in the image data array.
self._header.set('NAXIS1', cols.dtype.itemsize,
'width of table in bytes')
self._header.set('TFIELDS', ncols, 'number of fields in each row',
after='GCOUNT')
self._header.set('ZIMAGE', True, 'extension contains compressed image',
after=after)
self._header.set('ZBITPIX', zbitpix,
bitpix_comment, after='ZIMAGE')
self._header.set('ZNAXIS', self._image_header['NAXIS'], naxis_comment,
after='ZBITPIX')
# Strip the table header of all the ZNAZISn and ZTILEn keywords
# that may be left over from the previous data
for idx in itertools.count(1):
try:
del self._header['ZNAXIS' + str(idx)]
del self._header['ZTILE' + str(idx)]
except KeyError:
break
# Verify that any input tile size parameter is the appropriate
# size to match the HDU's data.
naxis = self._image_header['NAXIS']
if not tile_size:
tile_size = []
elif len(tile_size) != naxis:
warnings.warn('Provided tile size not appropriate for the data. '
'Default tile size will be used.', AstropyUserWarning)
tile_size = []
# Set default tile dimensions for HCOMPRESS_1
if compression_type == 'HCOMPRESS_1':
if (self._image_header['NAXIS1'] < 4 or
self._image_header['NAXIS2'] < 4):
raise ValueError('Hcompress minimum image dimension is '
'4 pixels')
elif tile_size:
if tile_size[0] < 4 or tile_size[1] < 4:
# user specified tile size is too small
raise ValueError('Hcompress minimum tile dimension is '
'4 pixels')
major_dims = len([ts for ts in tile_size if ts > 1])
if major_dims > 2:
raise ValueError(
'HCOMPRESS can only support 2-dimensional tile sizes.'
'All but two of the tile_size dimensions must be set '
'to 1.')
if tile_size and (tile_size[0] == 0 and tile_size[1] == 0):
# compress the whole image as a single tile
tile_size[0] = self._image_header['NAXIS1']
tile_size[1] = self._image_header['NAXIS2']
for i in range(2, naxis):
# set all higher tile dimensions = 1
tile_size[i] = 1
elif not tile_size:
# The Hcompress algorithm is inherently 2D in nature, so the
# row by row tiling that is used for other compression
# algorithms is not appropriate. If the image has less than 30
# rows, then the entire image will be compressed as a single
# tile. Otherwise the tiles will consist of 16 rows of the
# image. This keeps the tiles to a reasonable size, and it
# also includes enough rows to allow good compression
# efficiency. It the last tile of the image happens to contain
# less than 4 rows, then find another tile size with between 14
# and 30 rows (preferably even), so that the last tile has at
# least 4 rows.
# 1st tile dimension is the row length of the image
tile_size.append(self._image_header['NAXIS1'])
if self._image_header['NAXIS2'] <= 30:
tile_size.append(self._image_header['NAXIS1'])
else:
# look for another good tile dimension
naxis2 = self._image_header['NAXIS2']
for dim in [16, 24, 20, 30, 28, 26, 22, 18, 14]:
if naxis2 % dim == 0 or naxis2 % dim > 3:
tile_size.append(dim)
break
else:
tile_size.append(17)
for i in range(2, naxis):
# set all higher tile dimensions = 1
tile_size.append(1)
# check if requested tile size causes the last tile to have
# less than 4 pixels
remain = self._image_header['NAXIS1'] % tile_size[0] # 1st dimen
if remain > 0 and remain < 4:
tile_size[0] += 1 # try increasing tile size by 1
remain = self._image_header['NAXIS1'] % tile_size[0]
if remain > 0 and remain < 4:
raise ValueError('Last tile along 1st dimension has '
'less than 4 pixels')
remain = self._image_header['NAXIS2'] % tile_size[1] # 2nd dimen
if remain > 0 and remain < 4:
tile_size[1] += 1 # try increasing tile size by 1
remain = self._image_header['NAXIS2'] % tile_size[1]
if remain > 0 and remain < 4:
raise ValueError('Last tile along 2nd dimension has '
'less than 4 pixels')
# Set up locations for writing the next cards in the header.
last_znaxis = 'ZNAXIS'
if self._image_header['NAXIS'] > 0:
after1 = 'ZNAXIS1'
else:
after1 = 'ZNAXIS'
# Calculate the number of rows in the output table and
# write the ZNAXISn and ZTILEn cards to the table header.
nrows = 0
for idx, axis in enumerate(self._axes):
naxis = 'NAXIS' + str(idx + 1)
znaxis = 'ZNAXIS' + str(idx + 1)
ztile = 'ZTILE' + str(idx + 1)
if tile_size and len(tile_size) >= idx + 1:
ts = tile_size[idx]
else:
if ztile not in self._header:
# Default tile size
if not idx:
ts = self._image_header['NAXIS1']
else:
ts = 1
else:
ts = self._header[ztile]
tile_size.append(ts)
if not nrows:
nrows = (axis - 1) // ts + 1
else:
nrows *= ((axis - 1) // ts + 1)
if image_header and naxis in image_header:
self._header.set(znaxis, axis, image_header.comments[naxis],
after=last_znaxis)
else:
self._header.set(znaxis, axis,
'length of original image axis',
after=last_znaxis)
self._header.set(ztile, ts, 'size of tiles to be compressed',
after=after1)
last_znaxis = znaxis
after1 = ztile
# Set the NAXIS2 header card in the table hdu to the number of
# rows in the table.
self._header.set('NAXIS2', nrows, 'number of rows in table')
self.columns = cols
# Set the compression parameters in the table header.
# First, setup the values to be used for the compression parameters
# in case none were passed in. This will be either the value
# already in the table header for that parameter or the default
# value.
for idx in itertools.count(1):
zname = 'ZNAME' + str(idx)
if zname not in self._header:
break
zval = 'ZVAL' + str(idx)
if self._header[zname] == 'NOISEBIT':
if quantize_level is None:
quantize_level = self._header[zval]
if self._header[zname] == 'SCALE ':
if hcomp_scale is None:
hcomp_scale = self._header[zval]
if self._header[zname] == 'SMOOTH ':
if hcomp_smooth is None:
hcomp_smooth = self._header[zval]
if quantize_level is None:
quantize_level = DEFAULT_QUANTIZE_LEVEL
if hcomp_scale is None:
hcomp_scale = DEFAULT_HCOMP_SCALE
if hcomp_smooth is None:
hcomp_smooth = DEFAULT_HCOMP_SCALE
# Next, strip the table header of all the ZNAMEn and ZVALn keywords
# that may be left over from the previous data
for idx in itertools.count(1):
zname = 'ZNAME' + str(idx)
if zname not in self._header:
break
zval = 'ZVAL' + str(idx)
del self._header[zname]
del self._header[zval]
# Finally, put the appropriate keywords back based on the
# compression type.
after_keyword = 'ZCMPTYPE'
idx = 1
if compression_type == 'RICE_1':
self._header.set('ZNAME1', 'BLOCKSIZE', 'compression block size',
after=after_keyword)
self._header.set('ZVAL1', DEFAULT_BLOCK_SIZE, 'pixels per block',
after='ZNAME1')
self._header.set('ZNAME2', 'BYTEPIX',
'bytes per pixel (1, 2, 4, or 8)', after='ZVAL1')
if self._header['ZBITPIX'] == 8:
bytepix = 1
elif self._header['ZBITPIX'] == 16:
bytepix = 2
else:
bytepix = DEFAULT_BYTE_PIX
self._header.set('ZVAL2', bytepix,
'bytes per pixel (1, 2, 4, or 8)',
after='ZNAME2')
after_keyword = 'ZVAL2'
idx = 3
elif compression_type == 'HCOMPRESS_1':
self._header.set('ZNAME1', 'SCALE', 'HCOMPRESS scale factor',
after=after_keyword)
self._header.set('ZVAL1', hcomp_scale, 'HCOMPRESS scale factor',
after='ZNAME1')
self._header.set('ZNAME2', 'SMOOTH', 'HCOMPRESS smooth option',
after='ZVAL1')
self._header.set('ZVAL2', hcomp_smooth, 'HCOMPRESS smooth option',
after='ZNAME2')
after_keyword = 'ZVAL2'
idx = 3
if self._image_header['BITPIX'] < 0: # floating point image
self._header.set('ZNAME' + str(idx), 'NOISEBIT',
'floating point quantization level',
after=after_keyword)
self._header.set('ZVAL' + str(idx), quantize_level,
'floating point quantization level',
after='ZNAME' + str(idx))
# Add the dither method and seed
if quantize_method:
if quantize_method not in [NO_DITHER, SUBTRACTIVE_DITHER_1,
SUBTRACTIVE_DITHER_2]:
name = QUANTIZE_METHOD_NAMES[DEFAULT_QUANTIZE_METHOD]
warnings.warn('Unknown quantization method provided. '
'Default method ({}) used.'.format(name))
quantize_method = DEFAULT_QUANTIZE_METHOD
if quantize_method == NO_DITHER:
zquantiz_comment = 'No dithering during quantization'
else:
zquantiz_comment = 'Pixel Quantization Algorithm'
self._header.set('ZQUANTIZ',
QUANTIZE_METHOD_NAMES[quantize_method],
zquantiz_comment,
after='ZVAL' + str(idx))
else:
# If the ZQUANTIZ keyword is missing the default is to assume
# no dithering, rather than whatever DEFAULT_QUANTIZE_METHOD
# is set to
quantize_method = self._header.get('ZQUANTIZ', NO_DITHER)
if isinstance(quantize_method, str):
for k, v in QUANTIZE_METHOD_NAMES.items():
if v.upper() == quantize_method:
quantize_method = k
break
else:
quantize_method = NO_DITHER
if quantize_method == NO_DITHER:
if 'ZDITHER0' in self._header:
# If dithering isn't being used then there's no reason to
# keep the ZDITHER0 keyword
del self._header['ZDITHER0']
else:
if dither_seed:
dither_seed = self._generate_dither_seed(dither_seed)
elif 'ZDITHER0' in self._header:
dither_seed = self._header['ZDITHER0']
else:
dither_seed = self._generate_dither_seed(
DEFAULT_DITHER_SEED)
self._header.set('ZDITHER0', dither_seed,
'dithering offset when quantizing floats',
after='ZQUANTIZ')
if image_header:
# Move SIMPLE card from the image header to the
# table header as ZSIMPLE card.
if 'SIMPLE' in image_header:
self._header.set('ZSIMPLE', image_header['SIMPLE'],
image_header.comments['SIMPLE'],
before='ZBITPIX')
# Move EXTEND card from the image header to the
# table header as ZEXTEND card.
if 'EXTEND' in image_header:
self._header.set('ZEXTEND', image_header['EXTEND'],
image_header.comments['EXTEND'])
# Move BLOCKED card from the image header to the
# table header as ZBLOCKED card.
if 'BLOCKED' in image_header:
self._header.set('ZBLOCKED', image_header['BLOCKED'],
image_header.comments['BLOCKED'])
# Move XTENSION card from the image header to the
# table header as ZTENSION card.
# Since we only handle compressed IMAGEs, ZTENSION should
# always be IMAGE, even if the caller has passed in a header
# for some other type of extension.
if 'XTENSION' in image_header:
self._header.set('ZTENSION', 'IMAGE',
image_header.comments['XTENSION'],
before='ZBITPIX')
# Move PCOUNT and GCOUNT cards from image header to the table
# header as ZPCOUNT and ZGCOUNT cards.
if 'PCOUNT' in image_header:
self._header.set('ZPCOUNT', image_header['PCOUNT'],
image_header.comments['PCOUNT'],
after=last_znaxis)
if 'GCOUNT' in image_header:
self._header.set('ZGCOUNT', image_header['GCOUNT'],
image_header.comments['GCOUNT'],
after='ZPCOUNT')
# Move CHECKSUM and DATASUM cards from the image header to the
# table header as XHECKSUM and XDATASUM cards.
if 'CHECKSUM' in image_header:
self._header.set('ZHECKSUM', image_header['CHECKSUM'],
image_header.comments['CHECKSUM'])
if 'DATASUM' in image_header:
self._header.set('ZDATASUM', image_header['DATASUM'],
image_header.comments['DATASUM'])
else:
# Move XTENSION card from the image header to the
# table header as ZTENSION card.
# Since we only handle compressed IMAGEs, ZTENSION should
# always be IMAGE, even if the caller has passed in a header
# for some other type of extension.
if 'XTENSION' in self._image_header:
self._header.set('ZTENSION', 'IMAGE',
self._image_header.comments['XTENSION'],
before='ZBITPIX')
# Move PCOUNT and GCOUNT cards from image header to the table
# header as ZPCOUNT and ZGCOUNT cards.
if 'PCOUNT' in self._image_header:
self._header.set('ZPCOUNT', self._image_header['PCOUNT'],
self._image_header.comments['PCOUNT'],
after=last_znaxis)
if 'GCOUNT' in self._image_header:
self._header.set('ZGCOUNT', self._image_header['GCOUNT'],
self._image_header.comments['GCOUNT'],
after='ZPCOUNT')
# When we have an image checksum we need to ensure that the same
# number of blank cards exist in the table header as there were in
# the image header. This allows those blank cards to be carried
# over to the image header when the hdu is uncompressed.
if 'ZHECKSUM' in self._header:
required_blanks = image_header._countblanks()
image_blanks = self._image_header._countblanks()
table_blanks = self._header._countblanks()
for _ in range(required_blanks - image_blanks):
self._image_header.append()
table_blanks += 1
for _ in range(required_blanks - table_blanks):
self._header.append()
@lazyproperty
def data(self):
# The data attribute is the image data (not the table data).
data = compression.decompress_hdu(self)
if data is None:
return data
# Scale the data if necessary
if (self._orig_bzero != 0 or self._orig_bscale != 1):
new_dtype = self._dtype_for_bitpix()
data = np.array(data, dtype=new_dtype)
zblank = None
if 'ZBLANK' in self.compressed_data.columns.names:
zblank = self.compressed_data['ZBLANK']
else:
if 'ZBLANK' in self._header:
zblank = np.array(self._header['ZBLANK'], dtype='int32')
elif 'BLANK' in self._header:
zblank = np.array(self._header['BLANK'], dtype='int32')
if zblank is not None:
blanks = (data == zblank)
if self._bscale != 1:
np.multiply(data, self._bscale, data)
if self._bzero != 0:
# We have to explicitly cast self._bzero to prevent numpy from
# raising an error when doing self.data += self._bzero, and we
# do this instead of self.data = self.data + self._bzero to
# avoid doubling memory usage.
np.add(data, self._bzero, out=data, casting='unsafe')
if zblank is not None:
data = np.where(blanks, np.nan, data)
# Right out of _ImageBaseHDU.data
self._update_header_scale_info(data.dtype)
return data
@data.setter
def data(self, data):
if (data is not None) and (not isinstance(data, np.ndarray) or
data.dtype.fields is not None):
raise TypeError('CompImageHDU data has incorrect type:{}; '
'dtype.fields = {}'.format(
type(data), data.dtype.fields))
@lazyproperty
def compressed_data(self):
# First we will get the table data (the compressed
# data) from the file, if there is any.
compressed_data = super().data
if isinstance(compressed_data, np.rec.recarray):
# Make sure not to use 'del self.data' so we don't accidentally
# go through the self.data.fdel and close the mmap underlying
# the compressed_data array
del self.__dict__['data']
return compressed_data
else:
# This will actually set self.compressed_data with the
# pre-allocated space for the compression data; this is something I
# might do away with in the future
self._update_compressed_data()
return self.compressed_data
@compressed_data.deleter
def compressed_data(self):
# Deleting the compressed_data attribute has to be handled
# with a little care to prevent a reference leak
# First delete the ._coldefs attributes under it to break a possible
# reference cycle
if 'compressed_data' in self.__dict__:
del self.__dict__['compressed_data']._coldefs
# Now go ahead and delete from self.__dict__; normally
# lazyproperty.__delete__ does this for us, but we can prempt it to
# do some additional cleanup
del self.__dict__['compressed_data']
# If this file was mmap'd, numpy.memmap will hold open a file
# handle until the underlying mmap object is garbage-collected;
# since this reference leak can sometimes hang around longer than
# welcome go ahead and force a garbage collection
gc.collect()
@property
def shape(self):
"""
Shape of the image array--should be equivalent to ``self.data.shape``.
"""
# Determine from the values read from the header
return tuple(reversed(self._axes))
@lazyproperty
def header(self):
# The header attribute is the header for the image data. It
# is not actually stored in the object dictionary. Instead,
# the _image_header is stored. If the _image_header attribute
# has already been defined we just return it. If not, we must
# create it from the table header (the _header attribute).
if hasattr(self, '_image_header'):
return self._image_header
# Clean up any possible doubled EXTNAME keywords that use
# the default. Do this on the original header to ensure
# duplicates are removed cleanly.
self._remove_unnecessary_default_extnames(self._header)
# Start with a copy of the table header.
image_header = self._header.copy()
# Delete cards that are related to the table. And move
# the values of those cards that relate to the image from
# their corresponding table cards. These include
# ZBITPIX -> BITPIX, ZNAXIS -> NAXIS, and ZNAXISn -> NAXISn.
# (Note: Used set here instead of list in case there are any duplicate
# keywords, which there may be in some pathological cases:
# https://github.com/astropy/astropy/issues/2750
for keyword in set(image_header):
if CompImageHeader._is_reserved_keyword(keyword, warn=False):
del image_header[keyword]
if 'ZSIMPLE' in self._header:
image_header.set('SIMPLE', self._header['ZSIMPLE'],
self._header.comments['ZSIMPLE'], before=0)
elif 'ZTENSION' in self._header:
if self._header['ZTENSION'] != 'IMAGE':
warnings.warn("ZTENSION keyword in compressed "
"extension != 'IMAGE'", AstropyUserWarning)
image_header.set('XTENSION', 'IMAGE',
self._header.comments['ZTENSION'], before=0)
else:
image_header.set('XTENSION', 'IMAGE', before=0)
image_header.set('BITPIX', self._header['ZBITPIX'],
self._header.comments['ZBITPIX'], before=1)
image_header.set('NAXIS', self._header['ZNAXIS'],
self._header.comments['ZNAXIS'], before=2)
last_naxis = 'NAXIS'
for idx in range(image_header['NAXIS']):
znaxis = 'ZNAXIS' + str(idx + 1)
naxis = znaxis[1:]
image_header.set(naxis, self._header[znaxis],
self._header.comments[znaxis],
after=last_naxis)
last_naxis = naxis
# Delete any other spurious NAXISn keywords:
naxis = image_header['NAXIS']
for keyword in list(image_header['NAXIS?*']):
try:
n = int(keyword[5:])
except Exception:
continue
if n > naxis:
del image_header[keyword]
# Although PCOUNT and GCOUNT are considered mandatory for IMAGE HDUs,
# ZPCOUNT and ZGCOUNT are optional, probably because for IMAGE HDUs
# their values are always 0 and 1 respectively
if 'ZPCOUNT' in self._header:
image_header.set('PCOUNT', self._header['ZPCOUNT'],
self._header.comments['ZPCOUNT'],
after=last_naxis)
else:
image_header.set('PCOUNT', 0, after=last_naxis)
if 'ZGCOUNT' in self._header:
image_header.set('GCOUNT', self._header['ZGCOUNT'],
self._header.comments['ZGCOUNT'],
after='PCOUNT')
else:
image_header.set('GCOUNT', 1, after='PCOUNT')
if 'ZEXTEND' in self._header:
image_header.set('EXTEND', self._header['ZEXTEND'],
self._header.comments['ZEXTEND'])
if 'ZBLOCKED' in self._header:
image_header.set('BLOCKED', self._header['ZBLOCKED'],
self._header.comments['ZBLOCKED'])
# Move the ZHECKSUM and ZDATASUM cards to the image header
# as CHECKSUM and DATASUM
if 'ZHECKSUM' in self._header:
image_header.set('CHECKSUM', self._header['ZHECKSUM'],
self._header.comments['ZHECKSUM'])
if 'ZDATASUM' in self._header:
image_header.set('DATASUM', self._header['ZDATASUM'],
self._header.comments['ZDATASUM'])
# Remove the EXTNAME card if the value in the table header
# is the default value of COMPRESSED_IMAGE.
if ('EXTNAME' in image_header and
image_header['EXTNAME'] == self._default_name):
del image_header['EXTNAME']
# Look to see if there are any blank cards in the table
# header. If there are, there should be the same number
# of blank cards in the image header. Add blank cards to
# the image header to make it so.
table_blanks = self._header._countblanks()
image_blanks = image_header._countblanks()
for _ in range(table_blanks - image_blanks):
image_header.append()
# Create the CompImageHeader that syncs with the table header, and save
# it off to self._image_header so it can be referenced later
# unambiguously
self._image_header = CompImageHeader(self._header, image_header)
return self._image_header
def _summary(self):
"""
Summarize the HDU: name, dimensions, and formats.
"""
class_name = self.__class__.__name__
# if data is touched, use data info.
if self._data_loaded:
if self.data is None:
_shape, _format = (), ''
else:
# the shape will be in the order of NAXIS's which is the
# reverse of the numarray shape
_shape = list(self.data.shape)
_format = self.data.dtype.name
_shape.reverse()
_shape = tuple(_shape)
_format = _format[_format.rfind('.') + 1:]
# if data is not touched yet, use header info.
else:
_shape = ()
for idx in range(self.header['NAXIS']):
_shape += (self.header['NAXIS' + str(idx + 1)],)
_format = BITPIX2DTYPE[self.header['BITPIX']]
return (self.name, self.ver, class_name, len(self.header), _shape,
_format)
def _update_compressed_data(self):
"""
Compress the image data so that it may be written to a file.
"""
# Check to see that the image_header matches the image data
image_bitpix = DTYPE2BITPIX[self.data.dtype.name]
if image_bitpix != self._orig_bitpix or self.data.shape != self.shape:
self._update_header_data(self.header)
# TODO: This is copied right out of _ImageBaseHDU._writedata_internal;
# it would be cool if we could use an internal ImageHDU and use that to
# write to a buffer for compression or something. See ticket #88
# deal with unsigned integer 16, 32 and 64 data
old_data = self.data
if _is_pseudo_integer(self.data.dtype):
# Convert the unsigned array to signed
self.data = np.array(
self.data - _pseudo_zero(self.data.dtype),
dtype=f'=i{self.data.dtype.itemsize}')
should_swap = False
else:
should_swap = not self.data.dtype.isnative
if should_swap:
if self.data.flags.writeable:
self.data.byteswap(True)
else:
# For read-only arrays, there is no way around making
# a byteswapped copy of the data.
self.data = self.data.byteswap(False)
try:
nrows = self._header['NAXIS2']
tbsize = self._header['NAXIS1'] * nrows
self._header['PCOUNT'] = 0
if 'THEAP' in self._header:
del self._header['THEAP']
self._theap = tbsize
# First delete the original compressed data, if it exists
del self.compressed_data
# Make sure that the data is contiguous otherwise CFITSIO
# will not write the expected data
self.data = np.ascontiguousarray(self.data)
# Compress the data.
# The current implementation of compress_hdu assumes the empty
# compressed data table has already been initialized in
# self.compressed_data, and writes directly to it
# compress_hdu returns the size of the heap for the written
# compressed image table
heapsize, self.compressed_data = compression.compress_hdu(self)
finally:
# if data was byteswapped return it to its original order
if should_swap:
self.data.byteswap(True)
self.data = old_data
# CFITSIO will write the compressed data in big-endian order
dtype = self.columns.dtype.newbyteorder('>')
buf = self.compressed_data
compressed_data = buf[:self._theap].view(dtype=dtype,
type=np.rec.recarray)
self.compressed_data = compressed_data.view(FITS_rec)
self.compressed_data._coldefs = self.columns
self.compressed_data._heapoffset = self._theap
self.compressed_data._heapsize = heapsize
def scale(self, type=None, option='old', bscale=1, bzero=0):
"""
Scale image data by using ``BSCALE`` and ``BZERO``.
Calling this method will scale ``self.data`` and update the keywords of
``BSCALE`` and ``BZERO`` in ``self._header`` and ``self._image_header``.
This method should only be used right before writing to the output
file, as the data will be scaled and is therefore not very usable after
the call.
Parameters
----------
type : str, optional
destination data type, use a string representing a numpy dtype
name, (e.g. ``'uint8'``, ``'int16'``, ``'float32'`` etc.). If is
`None`, use the current data type.
option : str, optional
how to scale the data: if ``"old"``, use the original ``BSCALE``
and ``BZERO`` values when the data was read/created. If
``"minmax"``, use the minimum and maximum of the data to scale.
The option will be overwritten by any user-specified bscale/bzero
values.
bscale, bzero : int, optional
user specified ``BSCALE`` and ``BZERO`` values.
"""
if self.data is None:
return
# Determine the destination (numpy) data type
if type is None:
type = BITPIX2DTYPE[self._bitpix]
_type = getattr(np, type)
# Determine how to scale the data
# bscale and bzero takes priority
if (bscale != 1 or bzero != 0):
_scale = bscale
_zero = bzero
else:
if option == 'old':
_scale = self._orig_bscale
_zero = self._orig_bzero
elif option == 'minmax':
if isinstance(_type, np.floating):
_scale = 1
_zero = 0
else:
_min = np.minimum.reduce(self.data.flat)
_max = np.maximum.reduce(self.data.flat)
if _type == np.uint8: # uint8 case
_zero = _min
_scale = (_max - _min) / (2. ** 8 - 1)
else:
_zero = (_max + _min) / 2.
# throw away -2^N
_scale = (_max - _min) / (2. ** (8 * _type.bytes) - 2)
# Do the scaling
if _zero != 0:
# We have to explicitly cast self._bzero to prevent numpy from
# raising an error when doing self.data -= _zero, and we
# do this instead of self.data = self.data - _zero to
# avoid doubling memory usage.
np.subtract(self.data, _zero, out=self.data, casting='unsafe')
self.header['BZERO'] = _zero
else:
# Delete from both headers
for header in (self.header, self._header):
with suppress(KeyError):
del header['BZERO']
if _scale != 1:
self.data /= _scale
self.header['BSCALE'] = _scale
else:
for header in (self.header, self._header):
with suppress(KeyError):
del header['BSCALE']
if self.data.dtype.type != _type:
self.data = np.array(np.around(self.data), dtype=_type) # 0.7.7.1
# Update the BITPIX Card to match the data
self._bitpix = DTYPE2BITPIX[self.data.dtype.name]
self._bzero = self.header.get('BZERO', 0)
self._bscale = self.header.get('BSCALE', 1)
# Update BITPIX for the image header specifically
# TODO: Make this more clear by using self._image_header, but only once
# this has been fixed so that the _image_header attribute is guaranteed
# to be valid
self.header['BITPIX'] = self._bitpix
# Update the table header to match the scaled data
self._update_header_data(self.header)
# Since the image has been manually scaled, the current
# bitpix/bzero/bscale now serve as the 'original' scaling of the image,
# as though the original image has been completely replaced
self._orig_bitpix = self._bitpix
self._orig_bzero = self._bzero
self._orig_bscale = self._bscale
def _prewriteto(self, checksum=False, inplace=False):
if self._scale_back:
self.scale(BITPIX2DTYPE[self._orig_bitpix])
if self._has_data:
self._update_compressed_data()
# Use methods in the superclass to update the header with
# scale/checksum keywords based on the data type of the image data
self._update_pseudo_int_scale_keywords()
# Shove the image header and data into a new ImageHDU and use that
# to compute the image checksum
image_hdu = ImageHDU(data=self.data, header=self.header)
image_hdu._update_checksum(checksum)
if 'CHECKSUM' in image_hdu.header:
# This will also pass through to the ZHECKSUM keyword and
# ZDATASUM keyword
self._image_header.set('CHECKSUM',
image_hdu.header['CHECKSUM'],
image_hdu.header.comments['CHECKSUM'])
if 'DATASUM' in image_hdu.header:
self._image_header.set('DATASUM', image_hdu.header['DATASUM'],
image_hdu.header.comments['DATASUM'])
# Store a temporary backup of self.data in a different attribute;
# see below
self._imagedata = self.data
# Now we need to perform an ugly hack to set the compressed data as
# the .data attribute on the HDU so that the call to _writedata
# handles it properly
self.__dict__['data'] = self.compressed_data
return super()._prewriteto(checksum=checksum, inplace=inplace)
def _writeheader(self, fileobj):
"""
Bypasses `BinTableHDU._writeheader()` which updates the header with
metadata about the data that is meaningless here; another reason
why this class maybe shouldn't inherit directly from BinTableHDU...
"""
return ExtensionHDU._writeheader(self, fileobj)
def _writedata(self, fileobj):
"""
Wrap the basic ``_writedata`` method to restore the ``.data``
attribute to the uncompressed image data in the case of an exception.
"""
try:
return super()._writedata(fileobj)
finally:
# Restore the .data attribute to its rightful value (if any)
if hasattr(self, '_imagedata'):
self.__dict__['data'] = self._imagedata
del self._imagedata
else:
del self.data
def _close(self, closed=True):
super()._close(closed=closed)
# Also make sure to close access to the compressed data mmaps
if (closed and self._data_loaded and
_get_array_mmap(self.compressed_data) is not None):
del self.compressed_data
# TODO: This was copied right out of _ImageBaseHDU; get rid of it once we
# find a way to rewrite this class as either a subclass or wrapper for an
# ImageHDU
def _dtype_for_bitpix(self):
"""
Determine the dtype that the data should be converted to depending on
the BITPIX value in the header, and possibly on the BSCALE value as
well. Returns None if there should not be any change.
"""
bitpix = self._orig_bitpix
# Handle possible conversion to uints if enabled
if self._uint and self._orig_bscale == 1:
for bits, dtype in ((16, np.dtype('uint16')),
(32, np.dtype('uint32')),
(64, np.dtype('uint64'))):
if bitpix == bits and self._orig_bzero == 1 << (bits - 1):
return dtype
if bitpix > 16: # scale integers to Float64
return np.dtype('float64')
elif bitpix > 0: # scale integers to Float32
return np.dtype('float32')
def _update_header_scale_info(self, dtype=None):
if (not self._do_not_scale_image_data and
not (self._orig_bzero == 0 and self._orig_bscale == 1)):
for keyword in ['BSCALE', 'BZERO']:
# Make sure to delete from both the image header and the table
# header; later this will be streamlined
for header in (self.header, self._header):
with suppress(KeyError):
del header[keyword]
# Since _update_header_scale_info can, currently, be
# called *after* _prewriteto(), replace these with
# blank cards so the header size doesn't change
header.append()
if dtype is None:
dtype = self._dtype_for_bitpix()
if dtype is not None:
self.header['BITPIX'] = DTYPE2BITPIX[dtype.name]
self._bzero = 0
self._bscale = 1
self._bitpix = self.header['BITPIX']
def _generate_dither_seed(self, seed):
if not _is_int(seed):
raise TypeError("Seed must be an integer")
if not -1 <= seed <= 10000:
raise ValueError(
"Seed for random dithering must be either between 1 and "
"10000 inclusive, 0 for autogeneration from the system "
"clock, or -1 for autogeneration from a checksum of the first "
"image tile (got {})".format(seed))
if seed == DITHER_SEED_CHECKSUM:
# Determine the tile dimensions from the ZTILEn keywords
naxis = self._header['ZNAXIS']
tile_dims = [self._header[f'ZTILE{idx + 1}']
for idx in range(naxis)]
tile_dims.reverse()
# Get the first tile by using the tile dimensions as the end
# indices of slices (starting from 0)
first_tile = self.data[tuple(slice(d) for d in tile_dims)]
# The checksum algorithm used is literally just the sum of the bytes
# of the tile data (not its actual floating point values). Integer
# overflow is irrelevant.
csum = first_tile.view(dtype='uint8').sum()
# Since CFITSIO uses an unsigned long (which may be different on
# different platforms) go ahead and truncate the sum to its
# unsigned long value and take the result modulo 10000
return (ctypes.c_ulong(csum).value % 10000) + 1
elif seed == DITHER_SEED_CLOCK:
# This isn't exactly the same algorithm as CFITSIO, but that's okay
# since the result is meant to be arbitrary. The primary difference
# is that CFITSIO incorporates the HDU number into the result in
# the hopes of heading off the possibility of the same seed being
# generated for two HDUs at the same time. Here instead we just
# add in the HDU object's id
return ((sum(int(x) for x in math.modf(time.time())) + id(self)) %
10000) + 1
else:
return seed
|
81d1e03552c55b106daeed83f83353ef8c890c40b5dcb52925ad8e481b62268b | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import datetime
import os
import sys
import warnings
from contextlib import suppress
from inspect import signature, Parameter
import numpy as np
from astropy.io.fits import conf
from astropy.io.fits.file import _File
from astropy.io.fits.header import (Header, _BasicHeader, _pad_length,
_DelayedHeader)
from astropy.io.fits.util import (_is_int, _is_pseudo_integer, _pseudo_zero,
itersubclasses, decode_ascii, _get_array_mmap, first,
_free_space_check, _extract_number)
from astropy.io.fits.verify import _Verify, _ErrList
from astropy.utils import lazyproperty
from astropy.utils.exceptions import AstropyUserWarning
__all__ = [
"DELAYED",
# classes
"InvalidHDUException",
"ExtensionHDU",
"NonstandardExtHDU",
]
class _Delayed:
pass
DELAYED = _Delayed()
BITPIX2DTYPE = {8: 'uint8', 16: 'int16', 32: 'int32', 64: 'int64',
-32: 'float32', -64: 'float64'}
"""Maps FITS BITPIX values to Numpy dtype names."""
DTYPE2BITPIX = {'int8': 8, 'uint8': 8, 'int16': 16, 'uint16': 16,
'int32': 32, 'uint32': 32, 'int64': 64, 'uint64': 64,
'float32': -32, 'float64': -64}
"""
Maps Numpy dtype names to FITS BITPIX values (this includes unsigned
integers, with the assumption that the pseudo-unsigned integer convention
will be used in this case.
"""
class InvalidHDUException(Exception):
"""
A custom exception class used mainly to signal to _BaseHDU.__new__ that
an HDU cannot possibly be considered valid, and must be assumed to be
corrupted.
"""
def _hdu_class_from_header(cls, header):
"""
Iterates through the subclasses of _BaseHDU and uses that class's
match_header() method to determine which subclass to instantiate.
It's important to be aware that the class hierarchy is traversed in a
depth-last order. Each match_header() should identify an HDU type as
uniquely as possible. Abstract types may choose to simply return False
or raise NotImplementedError to be skipped.
If any unexpected exceptions are raised while evaluating
match_header(), the type is taken to be _CorruptedHDU.
Used primarily by _BaseHDU._readfrom_internal and _BaseHDU._from_data to
find an appropriate HDU class to use based on values in the header.
"""
klass = cls # By default, if no subclasses are defined
if header:
for c in reversed(list(itersubclasses(cls))):
try:
# HDU classes built into astropy.io.fits are always considered,
# but extension HDUs must be explicitly registered
if not (c.__module__.startswith('astropy.io.fits.') or
c in cls._hdu_registry):
continue
if c.match_header(header):
klass = c
break
except NotImplementedError:
continue
except Exception as exc:
warnings.warn(
'An exception occurred matching an HDU header to the '
'appropriate HDU type: {}'.format(exc),
AstropyUserWarning)
warnings.warn('The HDU will be treated as corrupted.',
AstropyUserWarning)
klass = _CorruptedHDU
del exc
break
return klass
# TODO: Come up with a better __repr__ for HDUs (and for HDULists, for that
# matter)
class _BaseHDU:
"""Base class for all HDU (header data unit) classes."""
_hdu_registry = set()
# This HDU type is part of the FITS standard
_standard = True
# Byte to use for padding out blocks
_padding_byte = '\x00'
_default_name = ''
# _header uses a descriptor to delay the loading of the fits.Header object
# until it is necessary.
_header = _DelayedHeader()
def __init__(self, data=None, header=None, *args, **kwargs):
if header is None:
header = Header()
self._header = header
self._header_str = None
self._file = None
self._buffer = None
self._header_offset = None
self._data_offset = None
self._data_size = None
# This internal variable is used to track whether the data attribute
# still points to the same data array as when the HDU was originally
# created (this does not track whether the data is actually the same
# content-wise)
self._data_replaced = False
self._data_needs_rescale = False
self._new = True
self._output_checksum = False
if 'DATASUM' in self._header and 'CHECKSUM' not in self._header:
self._output_checksum = 'datasum'
elif 'CHECKSUM' in self._header:
self._output_checksum = True
def __init_subclass__(cls, **kwargs):
# Add the same data.deleter to all HDUs with a data property.
# It's unfortunate, but there's otherwise no straightforward way
# that a property can inherit setters/deleters of the property of the
# same name on base classes.
data_prop = cls.__dict__.get('data', None)
if (isinstance(data_prop, (lazyproperty, property))
and data_prop.fdel is None):
# Don't do anything if the class has already explicitly
# set the deleter for its data property
def data(self):
# The deleter
if self._file is not None and self._data_loaded:
data_refcount = sys.getrefcount(self.data)
# Manually delete *now* so that FITS_rec.__del__
# cleanup can happen if applicable
del self.__dict__['data']
# Don't even do this unless the *only* reference to the
# .data array was the one we're deleting by deleting
# this attribute; if any other references to the array
# are hanging around (perhaps the user ran ``data =
# hdu.data``) don't even consider this:
if data_refcount == 2:
self._file._maybe_close_mmap()
setattr(cls, 'data', data_prop.deleter(data))
return super().__init_subclass__(**kwargs)
@property
def header(self):
return self._header
@header.setter
def header(self, value):
self._header = value
@property
def name(self):
# Convert the value to a string to be flexible in some pathological
# cases (see ticket #96)
return str(self._header.get('EXTNAME', self._default_name))
@name.setter
def name(self, value):
if not isinstance(value, str):
raise TypeError("'name' attribute must be a string")
if not conf.extension_name_case_sensitive:
value = value.upper()
if 'EXTNAME' in self._header:
self._header['EXTNAME'] = value
else:
self._header['EXTNAME'] = (value, 'extension name')
@property
def ver(self):
return self._header.get('EXTVER', 1)
@ver.setter
def ver(self, value):
if not _is_int(value):
raise TypeError("'ver' attribute must be an integer")
if 'EXTVER' in self._header:
self._header['EXTVER'] = value
else:
self._header['EXTVER'] = (value, 'extension value')
@property
def level(self):
return self._header.get('EXTLEVEL', 1)
@level.setter
def level(self, value):
if not _is_int(value):
raise TypeError("'level' attribute must be an integer")
if 'EXTLEVEL' in self._header:
self._header['EXTLEVEL'] = value
else:
self._header['EXTLEVEL'] = (value, 'extension level')
@property
def is_image(self):
return (
self.name == 'PRIMARY' or
('XTENSION' in self._header and
(self._header['XTENSION'] == 'IMAGE' or
(self._header['XTENSION'] == 'BINTABLE' and
'ZIMAGE' in self._header and self._header['ZIMAGE'] is True))))
@property
def _data_loaded(self):
return ('data' in self.__dict__ and self.data is not DELAYED)
@property
def _has_data(self):
return self._data_loaded and self.data is not None
@classmethod
def register_hdu(cls, hducls):
cls._hdu_registry.add(hducls)
@classmethod
def unregister_hdu(cls, hducls):
if hducls in cls._hdu_registry:
cls._hdu_registry.remove(hducls)
@classmethod
def match_header(cls, header):
raise NotImplementedError
@classmethod
def fromstring(cls, data, checksum=False, ignore_missing_end=False,
**kwargs):
"""
Creates a new HDU object of the appropriate type from a string
containing the HDU's entire header and, optionally, its data.
Note: When creating a new HDU from a string without a backing file
object, the data of that HDU may be read-only. It depends on whether
the underlying string was an immutable Python str/bytes object, or some
kind of read-write memory buffer such as a `memoryview`.
Parameters
----------
data : str, bytearray, memoryview, ndarray
A byte string containing the HDU's header and data.
checksum : bool, optional
Check the HDU's checksum and/or datasum.
ignore_missing_end : bool, optional
Ignore a missing end card in the header data. Note that without the
end card the end of the header may be ambiguous and resulted in a
corrupt HDU. In this case the assumption is that the first 2880
block that does not begin with valid FITS header data is the
beginning of the data.
**kwargs : optional
May consist of additional keyword arguments specific to an HDU
type--these correspond to keywords recognized by the constructors of
different HDU classes such as `PrimaryHDU`, `ImageHDU`, or
`BinTableHDU`. Any unrecognized keyword arguments are simply
ignored.
"""
return cls._readfrom_internal(data, checksum=checksum,
ignore_missing_end=ignore_missing_end,
**kwargs)
@classmethod
def readfrom(cls, fileobj, checksum=False, ignore_missing_end=False,
**kwargs):
"""
Read the HDU from a file. Normally an HDU should be opened with
:func:`open` which reads the entire HDU list in a FITS file. But this
method is still provided for symmetry with :func:`writeto`.
Parameters
----------
fileobj : file-like
Input FITS file. The file's seek pointer is assumed to be at the
beginning of the HDU.
checksum : bool
If `True`, verifies that both ``DATASUM`` and ``CHECKSUM`` card
values (when present in the HDU header) match the header and data
of all HDU's in the file.
ignore_missing_end : bool
Do not issue an exception when opening a file that is missing an
``END`` card in the last header.
"""
# TODO: Figure out a way to make it possible for the _File
# constructor to be a noop if the argument is already a _File
if not isinstance(fileobj, _File):
fileobj = _File(fileobj)
hdu = cls._readfrom_internal(fileobj, checksum=checksum,
ignore_missing_end=ignore_missing_end,
**kwargs)
# If the checksum had to be checked the data may have already been read
# from the file, in which case we don't want to seek relative
fileobj.seek(hdu._data_offset + hdu._data_size, os.SEEK_SET)
return hdu
def writeto(self, name, output_verify='exception', overwrite=False,
checksum=False):
"""
Write the HDU to a new file. This is a convenience method to
provide a user easier output interface if only one HDU needs
to be written to a file.
Parameters
----------
name : path-like or file-like
Output FITS file. If the file object is already opened, it must
be opened in a writeable mode.
output_verify : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
checksum : bool
When `True` adds both ``DATASUM`` and ``CHECKSUM`` cards
to the header of the HDU when written to the file.
"""
from .hdulist import HDUList
hdulist = HDUList([self])
hdulist.writeto(name, output_verify, overwrite=overwrite,
checksum=checksum)
@classmethod
def _from_data(cls, data, header, **kwargs):
"""
Instantiate the HDU object after guessing the HDU class from the
FITS Header.
"""
klass = _hdu_class_from_header(cls, header)
return klass(data=data, header=header, **kwargs)
@classmethod
def _readfrom_internal(cls, data, header=None, checksum=False,
ignore_missing_end=False, **kwargs):
"""
Provides the bulk of the internal implementation for readfrom and
fromstring.
For some special cases, supports using a header that was already
created, and just using the input data for the actual array data.
"""
hdu_buffer = None
hdu_fileobj = None
header_offset = 0
if isinstance(data, _File):
if header is None:
header_offset = data.tell()
try:
# First we try to read the header with the fast parser
# from _BasicHeader, which will read only the standard
# 8 character keywords to get the structural keywords
# that are needed to build the HDU object.
header_str, header = _BasicHeader.fromfile(data)
except Exception:
# If the fast header parsing failed, then fallback to
# the classic Header parser, which has better support
# and reporting for the various issues that can be found
# in the wild.
data.seek(header_offset)
header = Header.fromfile(data,
endcard=not ignore_missing_end)
hdu_fileobj = data
data_offset = data.tell() # *after* reading the header
else:
try:
# Test that the given object supports the buffer interface by
# ensuring an ndarray can be created from it
np.ndarray((), dtype='ubyte', buffer=data)
except TypeError:
raise TypeError(
'The provided object {!r} does not contain an underlying '
'memory buffer. fromstring() requires an object that '
'supports the buffer interface such as bytes, buffer, '
'memoryview, ndarray, etc. This restriction is to ensure '
'that efficient access to the array/table data is possible.'
.format(data))
if header is None:
def block_iter(nbytes):
idx = 0
while idx < len(data):
yield data[idx:idx + nbytes]
idx += nbytes
header_str, header = Header._from_blocks(
block_iter, True, '', not ignore_missing_end, True)
if len(data) > len(header_str):
hdu_buffer = data
elif data:
hdu_buffer = data
header_offset = 0
data_offset = len(header_str)
# Determine the appropriate arguments to pass to the constructor from
# self._kwargs. self._kwargs contains any number of optional arguments
# that may or may not be valid depending on the HDU type
cls = _hdu_class_from_header(cls, header)
sig = signature(cls.__init__)
new_kwargs = kwargs.copy()
if Parameter.VAR_KEYWORD not in (x.kind for x in sig.parameters.values()):
# If __init__ accepts arbitrary keyword arguments, then we can go
# ahead and pass all keyword arguments; otherwise we need to delete
# any that are invalid
for key in kwargs:
if key not in sig.parameters:
del new_kwargs[key]
try:
hdu = cls(data=DELAYED, header=header, **new_kwargs)
except TypeError:
# This may happen because some HDU class (e.g. GroupsHDU) wants
# to set a keyword on the header, which is not possible with the
# _BasicHeader. While HDU classes should not need to modify the
# header in general, sometimes this is needed to fix it. So in
# this case we build a full Header and try again to create the
# HDU object.
if isinstance(header, _BasicHeader):
header = Header.fromstring(header_str)
hdu = cls(data=DELAYED, header=header, **new_kwargs)
else:
raise
# One of these may be None, depending on whether the data came from a
# file or a string buffer--later this will be further abstracted
hdu._file = hdu_fileobj
hdu._buffer = hdu_buffer
hdu._header_offset = header_offset # beginning of the header area
hdu._data_offset = data_offset # beginning of the data area
# data area size, including padding
size = hdu.size
hdu._data_size = size + _pad_length(size)
if isinstance(hdu._header, _BasicHeader):
# Delete the temporary _BasicHeader.
# We need to do this before an eventual checksum computation,
# since it needs to modify temporarily the header
#
# The header string is stored in the HDU._header_str attribute,
# so that it can be used directly when we need to create the
# classic Header object, without having to parse again the file.
del hdu._header
hdu._header_str = header_str
# Checksums are not checked on invalid HDU types
if checksum and checksum != 'remove' and isinstance(hdu, _ValidHDU):
hdu._verify_checksum_datasum()
return hdu
def _get_raw_data(self, shape, code, offset):
"""
Return raw array from either the HDU's memory buffer or underlying
file.
"""
if isinstance(shape, int):
shape = (shape,)
if self._buffer:
return np.ndarray(shape, dtype=code, buffer=self._buffer,
offset=offset)
elif self._file:
return self._file.readarray(offset=offset, dtype=code, shape=shape)
else:
return None
# TODO: Rework checksum handling so that it's not necessary to add a
# checksum argument here
# TODO: The BaseHDU class shouldn't even handle checksums since they're
# only implemented on _ValidHDU...
def _prewriteto(self, checksum=False, inplace=False):
self._update_pseudo_int_scale_keywords()
# Handle checksum
self._update_checksum(checksum)
def _update_pseudo_int_scale_keywords(self):
"""
If the data is signed int 8, unsigned int 16, 32, or 64,
add BSCALE/BZERO cards to header.
"""
if (self._has_data and self._standard and
_is_pseudo_integer(self.data.dtype)):
# CompImageHDUs need TFIELDS immediately after GCOUNT,
# so BSCALE has to go after TFIELDS if it exists.
if 'TFIELDS' in self._header:
self._header.set('BSCALE', 1, after='TFIELDS')
elif 'GCOUNT' in self._header:
self._header.set('BSCALE', 1, after='GCOUNT')
else:
self._header.set('BSCALE', 1)
self._header.set('BZERO', _pseudo_zero(self.data.dtype),
after='BSCALE')
def _update_checksum(self, checksum, checksum_keyword='CHECKSUM',
datasum_keyword='DATASUM'):
"""Update the 'CHECKSUM' and 'DATASUM' keywords in the header (or
keywords with equivalent semantics given by the ``checksum_keyword``
and ``datasum_keyword`` arguments--see for example ``CompImageHDU``
for an example of why this might need to be overridden).
"""
# If the data is loaded it isn't necessarily 'modified', but we have no
# way of knowing for sure
modified = self._header._modified or self._data_loaded
if checksum == 'remove':
if checksum_keyword in self._header:
del self._header[checksum_keyword]
if datasum_keyword in self._header:
del self._header[datasum_keyword]
elif (modified or self._new or
(checksum and ('CHECKSUM' not in self._header or
'DATASUM' not in self._header or
not self._checksum_valid or
not self._datasum_valid))):
if checksum == 'datasum':
self.add_datasum(datasum_keyword=datasum_keyword)
elif checksum:
self.add_checksum(checksum_keyword=checksum_keyword,
datasum_keyword=datasum_keyword)
def _postwriteto(self):
# If data is unsigned integer 16, 32 or 64, remove the
# BSCALE/BZERO cards
if (self._has_data and self._standard and
_is_pseudo_integer(self.data.dtype)):
for keyword in ('BSCALE', 'BZERO'):
with suppress(KeyError):
del self._header[keyword]
def _writeheader(self, fileobj):
offset = 0
with suppress(AttributeError, OSError):
offset = fileobj.tell()
self._header.tofile(fileobj)
try:
size = fileobj.tell() - offset
except (AttributeError, OSError):
size = len(str(self._header))
return offset, size
def _writedata(self, fileobj):
size = 0
fileobj.flush()
try:
offset = fileobj.tell()
except (AttributeError, OSError):
offset = 0
if self._data_loaded or self._data_needs_rescale:
if self.data is not None:
size += self._writedata_internal(fileobj)
# pad the FITS data block
# to avoid a bug in the lustre filesystem client, don't
# write zero-byte objects
if size > 0 and _pad_length(size) > 0:
padding = _pad_length(size) * self._padding_byte
# TODO: Not that this is ever likely, but if for some odd
# reason _padding_byte is > 0x80 this will fail; but really if
# somebody's custom fits format is doing that, they're doing it
# wrong and should be reprimanded harshly.
fileobj.write(padding.encode('ascii'))
size += len(padding)
else:
# The data has not been modified or does not need need to be
# rescaled, so it can be copied, unmodified, directly from an
# existing file or buffer
size += self._writedata_direct_copy(fileobj)
# flush, to make sure the content is written
fileobj.flush()
# return both the location and the size of the data area
return offset, size
def _writedata_internal(self, fileobj):
"""
The beginning and end of most _writedata() implementations are the
same, but the details of writing the data array itself can vary between
HDU types, so that should be implemented in this method.
Should return the size in bytes of the data written.
"""
fileobj.writearray(self.data)
return self.data.size * self.data.itemsize
def _writedata_direct_copy(self, fileobj):
"""Copies the data directly from one file/buffer to the new file.
For now this is handled by loading the raw data from the existing data
(including any padding) via a memory map or from an already in-memory
buffer and using Numpy's existing file-writing facilities to write to
the new file.
If this proves too slow a more direct approach may be used.
"""
raw = self._get_raw_data(self._data_size, 'ubyte', self._data_offset)
if raw is not None:
fileobj.writearray(raw)
return raw.nbytes
else:
return 0
# TODO: This is the start of moving HDU writing out of the _File class;
# Though right now this is an internal private method (though still used by
# HDUList, eventually the plan is to have this be moved into writeto()
# somehow...
def _writeto(self, fileobj, inplace=False, copy=False):
try:
dirname = os.path.dirname(fileobj._file.name)
except (AttributeError, TypeError):
dirname = None
with _free_space_check(self, dirname):
self._writeto_internal(fileobj, inplace, copy)
def _writeto_internal(self, fileobj, inplace, copy):
# For now fileobj is assumed to be a _File object
if not inplace or self._new:
header_offset, _ = self._writeheader(fileobj)
data_offset, data_size = self._writedata(fileobj)
# Set the various data location attributes on newly-written HDUs
if self._new:
self._header_offset = header_offset
self._data_offset = data_offset
self._data_size = data_size
return
hdrloc = self._header_offset
hdrsize = self._data_offset - self._header_offset
datloc = self._data_offset
datsize = self._data_size
if self._header._modified:
# Seek to the original header location in the file
self._file.seek(hdrloc)
# This should update hdrloc with he header location in the new file
hdrloc, hdrsize = self._writeheader(fileobj)
# If the data is to be written below with self._writedata, that
# will also properly update the data location; but it should be
# updated here too
datloc = hdrloc + hdrsize
elif copy:
# Seek to the original header location in the file
self._file.seek(hdrloc)
# Before writing, update the hdrloc with the current file position,
# which is the hdrloc for the new file
hdrloc = fileobj.tell()
fileobj.write(self._file.read(hdrsize))
# The header size is unchanged, but the data location may be
# different from before depending on if previous HDUs were resized
datloc = fileobj.tell()
if self._data_loaded:
if self.data is not None:
# Seek through the array's bases for an memmap'd array; we
# can't rely on the _File object to give us this info since
# the user may have replaced the previous mmap'd array
if copy or self._data_replaced:
# Of course, if we're copying the data to a new file
# we don't care about flushing the original mmap;
# instead just read it into the new file
array_mmap = None
else:
array_mmap = _get_array_mmap(self.data)
if array_mmap is not None:
array_mmap.flush()
else:
self._file.seek(self._data_offset)
datloc, datsize = self._writedata(fileobj)
elif copy:
datsize = self._writedata_direct_copy(fileobj)
self._header_offset = hdrloc
self._data_offset = datloc
self._data_size = datsize
self._data_replaced = False
def _close(self, closed=True):
# If the data was mmap'd, close the underlying mmap (this will
# prevent any future access to the .data attribute if there are
# not other references to it; if there are other references then
# it is up to the user to clean those up
if (closed and self._data_loaded and
_get_array_mmap(self.data) is not None):
del self.data
# For backwards-compatibility, though nobody should have
# been using this directly:
_AllHDU = _BaseHDU
# For convenience...
# TODO: register_hdu could be made into a class decorator which would be pretty
# cool, but only once 2.6 support is dropped.
register_hdu = _BaseHDU.register_hdu
unregister_hdu = _BaseHDU.unregister_hdu
class _CorruptedHDU(_BaseHDU):
"""
A Corrupted HDU class.
This class is used when one or more mandatory `Card`s are
corrupted (unparsable), such as the ``BITPIX``, ``NAXIS``, or
``END`` cards. A corrupted HDU usually means that the data size
cannot be calculated or the ``END`` card is not found. In the case
of a missing ``END`` card, the `Header` may also contain the binary
data
.. note::
In future, it may be possible to decipher where the last block
of the `Header` ends, but this task may be difficult when the
extension is a `TableHDU` containing ASCII data.
"""
@property
def size(self):
"""
Returns the size (in bytes) of the HDU's data part.
"""
# Note: On compressed files this might report a negative size; but the
# file is corrupt anyways so I'm not too worried about it.
if self._buffer is not None:
return len(self._buffer) - self._data_offset
return self._file.size - self._data_offset
def _summary(self):
return (self.name, self.ver, 'CorruptedHDU')
def verify(self):
pass
class _NonstandardHDU(_BaseHDU, _Verify):
"""
A Non-standard HDU class.
This class is used for a Primary HDU when the ``SIMPLE`` Card has
a value of `False`. A non-standard HDU comes from a file that
resembles a FITS file but departs from the standards in some
significant way. One example would be files where the numbers are
in the DEC VAX internal storage format rather than the standard
FITS most significant byte first. The header for this HDU should
be valid. The data for this HDU is read from the file as a byte
stream that begins at the first byte after the header ``END`` card
and continues until the end of the file.
"""
_standard = False
@classmethod
def match_header(cls, header):
"""
Matches any HDU that has the 'SIMPLE' keyword but is not a standard
Primary or Groups HDU.
"""
# The SIMPLE keyword must be in the first card
card = header.cards[0]
# The check that 'GROUPS' is missing is a bit redundant, since the
# match_header for GroupsHDU will always be called before this one.
if card.keyword == 'SIMPLE':
if 'GROUPS' not in header and card.value is False:
return True
else:
raise InvalidHDUException
else:
return False
@property
def size(self):
"""
Returns the size (in bytes) of the HDU's data part.
"""
if self._buffer is not None:
return len(self._buffer) - self._data_offset
return self._file.size - self._data_offset
def _writedata(self, fileobj):
"""
Differs from the base class :class:`_writedata` in that it doesn't
automatically add padding, and treats the data as a string of raw bytes
instead of an array.
"""
offset = 0
size = 0
fileobj.flush()
try:
offset = fileobj.tell()
except OSError:
offset = 0
if self.data is not None:
fileobj.write(self.data)
# flush, to make sure the content is written
fileobj.flush()
size = len(self.data)
# return both the location and the size of the data area
return offset, size
def _summary(self):
return (self.name, self.ver, 'NonstandardHDU', len(self._header))
@lazyproperty
def data(self):
"""
Return the file data.
"""
return self._get_raw_data(self.size, 'ubyte', self._data_offset)
def _verify(self, option='warn'):
errs = _ErrList([], unit='Card')
# verify each card
for card in self._header.cards:
errs.append(card._verify(option))
return errs
class _ValidHDU(_BaseHDU, _Verify):
"""
Base class for all HDUs which are not corrupted.
"""
def __init__(self, data=None, header=None, name=None, ver=None, **kwargs):
super().__init__(data=data, header=header)
if (header is not None and
not isinstance(header, (Header, _BasicHeader))):
# TODO: Instead maybe try initializing a new Header object from
# whatever is passed in as the header--there are various types
# of objects that could work for this...
raise ValueError('header must be a Header object')
# NOTE: private data members _checksum and _datasum are used by the
# utility script "fitscheck" to detect missing checksums.
self._checksum = None
self._checksum_valid = None
self._datasum = None
self._datasum_valid = None
if name is not None:
self.name = name
if ver is not None:
self.ver = ver
@classmethod
def match_header(cls, header):
"""
Matches any HDU that is not recognized as having either the SIMPLE or
XTENSION keyword in its header's first card, but is nonetheless not
corrupted.
TODO: Maybe it would make more sense to use _NonstandardHDU in this
case? Not sure...
"""
return first(header.keys()) not in ('SIMPLE', 'XTENSION')
@property
def size(self):
"""
Size (in bytes) of the data portion of the HDU.
"""
return self._header.data_size
def filebytes(self):
"""
Calculates and returns the number of bytes that this HDU will write to
a file.
"""
f = _File()
# TODO: Fix this once new HDU writing API is settled on
return self._writeheader(f)[1] + self._writedata(f)[1]
def fileinfo(self):
"""
Returns a dictionary detailing information about the locations
of this HDU within any associated file. The values are only
valid after a read or write of the associated file with no
intervening changes to the `HDUList`.
Returns
-------
dict or None
The dictionary details information about the locations of
this HDU within an associated file. Returns `None` when
the HDU is not associated with a file.
Dictionary contents:
========== ================================================
Key Value
========== ================================================
file File object associated with the HDU
filemode Mode in which the file was opened (readonly, copyonwrite,
update, append, ostream)
hdrLoc Starting byte location of header in file
datLoc Starting byte location of data block in file
datSpan Data size including padding
========== ================================================
"""
if hasattr(self, '_file') and self._file:
return {'file': self._file, 'filemode': self._file.mode,
'hdrLoc': self._header_offset, 'datLoc': self._data_offset,
'datSpan': self._data_size}
else:
return None
def copy(self):
"""
Make a copy of the HDU, both header and data are copied.
"""
if self.data is not None:
data = self.data.copy()
else:
data = None
return self.__class__(data=data, header=self._header.copy())
def _verify(self, option='warn'):
errs = _ErrList([], unit='Card')
is_valid = BITPIX2DTYPE.__contains__
# Verify location and value of mandatory keywords.
# Do the first card here, instead of in the respective HDU classes, so
# the checking is in order, in case of required cards in wrong order.
if isinstance(self, ExtensionHDU):
firstkey = 'XTENSION'
firstval = self._extension
else:
firstkey = 'SIMPLE'
firstval = True
self.req_cards(firstkey, 0, None, firstval, option, errs)
self.req_cards('BITPIX', 1, lambda v: (_is_int(v) and is_valid(v)), 8,
option, errs)
self.req_cards('NAXIS', 2,
lambda v: (_is_int(v) and 0 <= v <= 999), 0,
option, errs)
naxis = self._header.get('NAXIS', 0)
if naxis < 1000:
for ax in range(3, naxis + 3):
key = 'NAXIS' + str(ax - 2)
self.req_cards(key, ax,
lambda v: (_is_int(v) and v >= 0),
_extract_number(self._header[key], default=1),
option, errs)
# Remove NAXISj cards where j is not in range 1, naxis inclusive.
for keyword in self._header:
if keyword.startswith('NAXIS') and len(keyword) > 5:
try:
number = int(keyword[5:])
if number <= 0 or number > naxis:
raise ValueError
except ValueError:
err_text = ("NAXISj keyword out of range ('{}' when "
"NAXIS == {})".format(keyword, naxis))
def fix(self=self, keyword=keyword):
del self._header[keyword]
errs.append(
self.run_option(option=option, err_text=err_text,
fix=fix, fix_text="Deleted."))
# Verify that the EXTNAME keyword exists and is a string
if 'EXTNAME' in self._header:
if not isinstance(self._header['EXTNAME'], str):
err_text = 'The EXTNAME keyword must have a string value.'
fix_text = 'Converted the EXTNAME keyword to a string value.'
def fix(header=self._header):
header['EXTNAME'] = str(header['EXTNAME'])
errs.append(self.run_option(option, err_text=err_text,
fix_text=fix_text, fix=fix))
# verify each card
for card in self._header.cards:
errs.append(card._verify(option))
return errs
# TODO: Improve this API a little bit--for one, most of these arguments
# could be optional
def req_cards(self, keyword, pos, test, fix_value, option, errlist):
"""
Check the existence, location, and value of a required `Card`.
Parameters
----------
keyword : str
The keyword to validate
pos : int, callable
If an ``int``, this specifies the exact location this card should
have in the header. Remember that Python is zero-indexed, so this
means ``pos=0`` requires the card to be the first card in the
header. If given a callable, it should take one argument--the
actual position of the keyword--and return `True` or `False`. This
can be used for custom evaluation. For example if
``pos=lambda idx: idx > 10`` this will check that the keyword's
index is greater than 10.
test : callable
This should be a callable (generally a function) that is passed the
value of the given keyword and returns `True` or `False`. This can
be used to validate the value associated with the given keyword.
fix_value : str, int, float, complex, bool, None
A valid value for a FITS keyword to to use if the given ``test``
fails to replace an invalid value. In other words, this provides
a default value to use as a replacement if the keyword's current
value is invalid. If `None`, there is no replacement value and the
keyword is unfixable.
option : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info.
errlist : list
A list of validation errors already found in the FITS file; this is
used primarily for the validation system to collect errors across
multiple HDUs and multiple calls to `req_cards`.
Notes
-----
If ``pos=None``, the card can be anywhere in the header. If the card
does not exist, the new card will have the ``fix_value`` as its value
when created. Also check the card's value by using the ``test``
argument.
"""
errs = errlist
fix = None
try:
index = self._header.index(keyword)
except ValueError:
index = None
fixable = fix_value is not None
insert_pos = len(self._header) + 1
# If pos is an int, insert at the given position (and convert it to a
# lambda)
if _is_int(pos):
insert_pos = pos
pos = lambda x: x == insert_pos
# if the card does not exist
if index is None:
err_text = f"'{keyword}' card does not exist."
fix_text = f"Fixed by inserting a new '{keyword}' card."
if fixable:
# use repr to accommodate both string and non-string types
# Boolean is also OK in this constructor
card = (keyword, fix_value)
def fix(self=self, insert_pos=insert_pos, card=card):
self._header.insert(insert_pos, card)
errs.append(self.run_option(option, err_text=err_text,
fix_text=fix_text, fix=fix, fixable=fixable))
else:
# if the supposed location is specified
if pos is not None:
if not pos(index):
err_text = f"'{keyword}' card at the wrong place (card {index})."
fix_text = f"Fixed by moving it to the right place (card {insert_pos})."
def fix(self=self, index=index, insert_pos=insert_pos):
card = self._header.cards[index]
del self._header[index]
self._header.insert(insert_pos, card)
errs.append(self.run_option(option, err_text=err_text,
fix_text=fix_text, fix=fix))
# if value checking is specified
if test:
val = self._header[keyword]
if not test(val):
err_text = f"'{keyword}' card has invalid value '{val}'."
fix_text = f"Fixed by setting a new value '{fix_value}'."
if fixable:
def fix(self=self, keyword=keyword, val=fix_value):
self._header[keyword] = fix_value
errs.append(self.run_option(option, err_text=err_text,
fix_text=fix_text, fix=fix, fixable=fixable))
return errs
def add_datasum(self, when=None, datasum_keyword='DATASUM'):
"""
Add the ``DATASUM`` card to this HDU with the value set to the
checksum calculated for the data.
Parameters
----------
when : str, optional
Comment string for the card that by default represents the
time when the checksum was calculated
datasum_keyword : str, optional
The name of the header keyword to store the datasum value in;
this is typically 'DATASUM' per convention, but there exist
use cases in which a different keyword should be used
Returns
-------
checksum : int
The calculated datasum
Notes
-----
For testing purposes, provide a ``when`` argument to enable the comment
value in the card to remain consistent. This will enable the
generation of a ``CHECKSUM`` card with a consistent value.
"""
cs = self._calculate_datasum()
if when is None:
when = f'data unit checksum updated {self._get_timestamp()}'
self._header[datasum_keyword] = (str(cs), when)
return cs
def add_checksum(self, when=None, override_datasum=False,
checksum_keyword='CHECKSUM', datasum_keyword='DATASUM'):
"""
Add the ``CHECKSUM`` and ``DATASUM`` cards to this HDU with
the values set to the checksum calculated for the HDU and the
data respectively. The addition of the ``DATASUM`` card may
be overridden.
Parameters
----------
when : str, optional
comment string for the cards; by default the comments
will represent the time when the checksum was calculated
override_datasum : bool, optional
add the ``CHECKSUM`` card only
checksum_keyword : str, optional
The name of the header keyword to store the checksum value in; this
is typically 'CHECKSUM' per convention, but there exist use cases
in which a different keyword should be used
datasum_keyword : str, optional
See ``checksum_keyword``
Notes
-----
For testing purposes, first call `add_datasum` with a ``when``
argument, then call `add_checksum` with a ``when`` argument and
``override_datasum`` set to `True`. This will provide consistent
comments for both cards and enable the generation of a ``CHECKSUM``
card with a consistent value.
"""
if not override_datasum:
# Calculate and add the data checksum to the header.
data_cs = self.add_datasum(when, datasum_keyword=datasum_keyword)
else:
# Just calculate the data checksum
data_cs = self._calculate_datasum()
if when is None:
when = f'HDU checksum updated {self._get_timestamp()}'
# Add the CHECKSUM card to the header with a value of all zeros.
if datasum_keyword in self._header:
self._header.set(checksum_keyword, '0' * 16, when,
before=datasum_keyword)
else:
self._header.set(checksum_keyword, '0' * 16, when)
csum = self._calculate_checksum(data_cs,
checksum_keyword=checksum_keyword)
self._header[checksum_keyword] = csum
def verify_datasum(self):
"""
Verify that the value in the ``DATASUM`` keyword matches the value
calculated for the ``DATASUM`` of the current HDU data.
Returns
-------
valid : int
- 0 - failure
- 1 - success
- 2 - no ``DATASUM`` keyword present
"""
if 'DATASUM' in self._header:
datasum = self._calculate_datasum()
if datasum == int(self._header['DATASUM']):
return 1
else:
# Failed
return 0
else:
return 2
def verify_checksum(self):
"""
Verify that the value in the ``CHECKSUM`` keyword matches the
value calculated for the current HDU CHECKSUM.
Returns
-------
valid : int
- 0 - failure
- 1 - success
- 2 - no ``CHECKSUM`` keyword present
"""
if 'CHECKSUM' in self._header:
if 'DATASUM' in self._header:
datasum = self._calculate_datasum()
else:
datasum = 0
checksum = self._calculate_checksum(datasum)
if checksum == self._header['CHECKSUM']:
return 1
else:
# Failed
return 0
else:
return 2
def _verify_checksum_datasum(self):
"""
Verify the checksum/datasum values if the cards exist in the header.
Simply displays warnings if either the checksum or datasum don't match.
"""
if 'CHECKSUM' in self._header:
self._checksum = self._header['CHECKSUM']
self._checksum_valid = self.verify_checksum()
if not self._checksum_valid:
warnings.warn(
'Checksum verification failed for HDU {}.\n'.format(
(self.name, self.ver)), AstropyUserWarning)
if 'DATASUM' in self._header:
self._datasum = self._header['DATASUM']
self._datasum_valid = self.verify_datasum()
if not self._datasum_valid:
warnings.warn(
'Datasum verification failed for HDU {}.\n'.format(
(self.name, self.ver)), AstropyUserWarning)
def _get_timestamp(self):
"""
Return the current timestamp in ISO 8601 format, with microseconds
stripped off.
Ex.: 2007-05-30T19:05:11
"""
return datetime.datetime.now().isoformat()[:19]
def _calculate_datasum(self):
"""
Calculate the value for the ``DATASUM`` card in the HDU.
"""
if not self._data_loaded:
# This is the case where the data has not been read from the file
# yet. We find the data in the file, read it, and calculate the
# datasum.
if self.size > 0:
raw_data = self._get_raw_data(self._data_size, 'ubyte',
self._data_offset)
return self._compute_checksum(raw_data)
else:
return 0
elif self.data is not None:
return self._compute_checksum(self.data.view('ubyte'))
else:
return 0
def _calculate_checksum(self, datasum, checksum_keyword='CHECKSUM'):
"""
Calculate the value of the ``CHECKSUM`` card in the HDU.
"""
old_checksum = self._header[checksum_keyword]
self._header[checksum_keyword] = '0' * 16
# Convert the header to bytes.
s = self._header.tostring().encode('utf8')
# Calculate the checksum of the Header and data.
cs = self._compute_checksum(np.frombuffer(s, dtype='ubyte'), datasum)
# Encode the checksum into a string.
s = self._char_encode(~cs)
# Return the header card value.
self._header[checksum_keyword] = old_checksum
return s
def _compute_checksum(self, data, sum32=0):
"""
Compute the ones-complement checksum of a sequence of bytes.
Parameters
----------
data
a memory region to checksum
sum32
incremental checksum value from another region
Returns
-------
ones complement checksum
"""
blocklen = 2880
sum32 = np.uint32(sum32)
for i in range(0, len(data), blocklen):
length = min(blocklen, len(data) - i) # ????
sum32 = self._compute_hdu_checksum(data[i:i + length], sum32)
return sum32
def _compute_hdu_checksum(self, data, sum32=0):
"""
Translated from FITS Checksum Proposal by Seaman, Pence, and Rots.
Use uint32 literals as a hedge against type promotion to int64.
This code should only be called with blocks of 2880 bytes
Longer blocks result in non-standard checksums with carry overflow
Historically, this code *was* called with larger blocks and for that
reason still needs to be for backward compatibility.
"""
u8 = np.uint32(8)
u16 = np.uint32(16)
uFFFF = np.uint32(0xFFFF)
if data.nbytes % 2:
last = data[-1]
data = data[:-1]
else:
last = np.uint32(0)
data = data.view('>u2')
hi = sum32 >> u16
lo = sum32 & uFFFF
hi += np.add.reduce(data[0::2], dtype=np.uint64)
lo += np.add.reduce(data[1::2], dtype=np.uint64)
if (data.nbytes // 2) % 2:
lo += last << u8
else:
hi += last << u8
hicarry = hi >> u16
locarry = lo >> u16
while hicarry or locarry:
hi = (hi & uFFFF) + locarry
lo = (lo & uFFFF) + hicarry
hicarry = hi >> u16
locarry = lo >> u16
return (hi << u16) + lo
# _MASK and _EXCLUDE used for encoding the checksum value into a character
# string.
_MASK = [0xFF000000,
0x00FF0000,
0x0000FF00,
0x000000FF]
_EXCLUDE = [0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40,
0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60]
def _encode_byte(self, byte):
"""
Encode a single byte.
"""
quotient = byte // 4 + ord('0')
remainder = byte % 4
ch = np.array(
[(quotient + remainder), quotient, quotient, quotient],
dtype='int32')
check = True
while check:
check = False
for x in self._EXCLUDE:
for j in [0, 2]:
if ch[j] == x or ch[j + 1] == x:
ch[j] += 1
ch[j + 1] -= 1
check = True
return ch
def _char_encode(self, value):
"""
Encodes the checksum ``value`` using the algorithm described
in SPR section A.7.2 and returns it as a 16 character string.
Parameters
----------
value
a checksum
Returns
-------
ascii encoded checksum
"""
value = np.uint32(value)
asc = np.zeros((16,), dtype='byte')
ascii = np.zeros((16,), dtype='byte')
for i in range(4):
byte = (value & self._MASK[i]) >> ((3 - i) * 8)
ch = self._encode_byte(byte)
for j in range(4):
asc[4 * j + i] = ch[j]
for i in range(16):
ascii[i] = asc[(i + 15) % 16]
return decode_ascii(ascii.tobytes())
class ExtensionHDU(_ValidHDU):
"""
An extension HDU class.
This class is the base class for the `TableHDU`, `ImageHDU`, and
`BinTableHDU` classes.
"""
_extension = ''
@classmethod
def match_header(cls, header):
"""
This class should never be instantiated directly. Either a standard
extension HDU type should be used for a specific extension, or
NonstandardExtHDU should be used.
"""
raise NotImplementedError
def writeto(self, name, output_verify='exception', overwrite=False,
checksum=False):
"""
Works similarly to the normal writeto(), but prepends a default
`PrimaryHDU` are required by extension HDUs (which cannot stand on
their own).
"""
from .hdulist import HDUList
from .image import PrimaryHDU
hdulist = HDUList([PrimaryHDU(), self])
hdulist.writeto(name, output_verify, overwrite=overwrite,
checksum=checksum)
def _verify(self, option='warn'):
errs = super()._verify(option=option)
# Verify location and value of mandatory keywords.
naxis = self._header.get('NAXIS', 0)
self.req_cards('PCOUNT', naxis + 3, lambda v: (_is_int(v) and v >= 0),
0, option, errs)
self.req_cards('GCOUNT', naxis + 4, lambda v: (_is_int(v) and v == 1),
1, option, errs)
return errs
# For backwards compatibility, though this needs to be deprecated
# TODO: Mark this as deprecated
_ExtensionHDU = ExtensionHDU
class NonstandardExtHDU(ExtensionHDU):
"""
A Non-standard Extension HDU class.
This class is used for an Extension HDU when the ``XTENSION``
`Card` has a non-standard value. In this case, Astropy can figure
out how big the data is but not what it is. The data for this HDU
is read from the file as a byte stream that begins at the first
byte after the header ``END`` card and continues until the
beginning of the next header or the end of the file.
"""
_standard = False
@classmethod
def match_header(cls, header):
"""
Matches any extension HDU that is not one of the standard extension HDU
types.
"""
card = header.cards[0]
xtension = card.value
if isinstance(xtension, str):
xtension = xtension.rstrip()
# A3DTABLE is not really considered a 'standard' extension, as it was
# sort of the prototype for BINTABLE; however, since our BINTABLE
# implementation handles A3DTABLE HDUs it is listed here.
standard_xtensions = ('IMAGE', 'TABLE', 'BINTABLE', 'A3DTABLE')
# The check that xtension is not one of the standard types should be
# redundant.
return (card.keyword == 'XTENSION' and
xtension not in standard_xtensions)
def _summary(self):
axes = tuple(self.data.shape)
return (self.name, self.ver, 'NonstandardExtHDU', len(self._header), axes)
@lazyproperty
def data(self):
"""
Return the file data.
"""
return self._get_raw_data(self.size, 'ubyte', self._data_offset)
# TODO: Mark this as deprecated
_NonstandardExtHDU = NonstandardExtHDU
|
c5387e5fa75f436c7fe2841eb597ddebada774466689baba6d083ad0ede2bd77 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import contextlib
import copy
import gc
import pickle
import re
import sys
import warnings
import pytest
import numpy as np
from numpy import char as chararray
try:
import objgraph
HAVE_OBJGRAPH = True
except ImportError:
HAVE_OBJGRAPH = False
from astropy.io import fits
from astropy.table import Table
from astropy.units import UnitsWarning, Unit, UnrecognizedUnit
from astropy.utils.compat import NUMPY_LT_1_22, NUMPY_LT_1_22_1
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyUserWarning
from astropy.io.fits.column import ColumnAttribute, Delayed, NUMPY2FITS
from astropy.io.fits.util import decode_ascii
from astropy.io.fits.verify import VerifyError
from . import FitsTestCase
def comparefloats(a, b):
"""
Compare two float scalars or arrays and see if they are consistent
Consistency is determined ensuring the difference is less than the
expected amount. Return True if consistent, False if any differences.
"""
aa = a
bb = b
# compute expected precision
if aa.dtype.name == 'float32' or bb.dtype.name == 'float32':
precision = 0.000001
else:
precision = 0.0000000000000001
precision = 0.00001 # until precision problem is fixed in astropy.io.fits
diff = np.absolute(aa - bb)
mask0 = aa == 0
masknz = aa != 0.
if np.any(mask0):
if diff[mask0].max() != 0.:
return False
if np.any(masknz):
if (diff[masknz] / np.absolute(aa[masknz])).max() > precision:
return False
return True
def comparerecords(a, b):
"""
Compare two record arrays
Does this field by field, using approximation testing for float columns
(Complex not yet handled.)
Column names not compared, but column types and sizes are.
"""
nfieldsa = len(a.dtype.names)
nfieldsb = len(b.dtype.names)
if nfieldsa != nfieldsb:
print("number of fields don't match")
return False
for i in range(nfieldsa):
fielda = a.field(i)
fieldb = b.field(i)
if fielda.dtype.char == 'S':
fielda = decode_ascii(fielda)
if fieldb.dtype.char == 'S':
fieldb = decode_ascii(fieldb)
if (not isinstance(fielda, type(fieldb)) and not
isinstance(fieldb, type(fielda))):
print("type(fielda): ", type(fielda), " fielda: ", fielda)
print("type(fieldb): ", type(fieldb), " fieldb: ", fieldb)
print(f'field {i} type differs')
return False
if len(fielda) and isinstance(fielda[0], np.floating):
if not comparefloats(fielda, fieldb):
print("fielda: ", fielda)
print("fieldb: ", fieldb)
print(f'field {i} differs')
return False
elif (isinstance(fielda, fits.column._VLF) or
isinstance(fieldb, fits.column._VLF)):
for row in range(len(fielda)):
if np.any(fielda[row] != fieldb[row]):
print(f'fielda[{row}]: {fielda[row]}')
print(f'fieldb[{row}]: {fieldb[row]}')
print(f'field {i} differs in row {row}')
else:
if np.any(fielda != fieldb):
print("fielda: ", fielda)
print("fieldb: ", fieldb)
print(f'field {i} differs')
return False
return True
def _assert_attr_col(new_tbhdu, tbhdu):
"""
Helper function to compare column attributes
"""
# Double check that the headers are equivalent
assert tbhdu.columns.names == new_tbhdu.columns.names
attrs = [k for k, v in fits.Column.__dict__.items()
if isinstance(v, ColumnAttribute)]
for name in tbhdu.columns.names:
col = tbhdu.columns[name]
new_col = new_tbhdu.columns[name]
for attr in attrs:
if getattr(col, attr) and getattr(new_col, attr):
assert getattr(col, attr) == getattr(new_col, attr)
class TestTableFunctions(FitsTestCase):
def test_constructor_copies_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/153
Ensure that a header from one HDU is copied when used to initialize new
HDU.
This is like the test of the same name in test_image, but tests this
for tables as well.
"""
ifd = fits.HDUList([fits.PrimaryHDU(), fits.BinTableHDU()])
thdr = ifd[1].header
thdr['FILENAME'] = 'labq01i3q_rawtag.fits'
thdu = fits.BinTableHDU(header=thdr)
ofd = fits.HDUList(thdu)
ofd[0].header['FILENAME'] = 'labq01i3q_flt.fits'
# Original header should be unchanged
assert thdr['FILENAME'] == 'labq01i3q_rawtag.fits'
def test_open(self):
# open some existing FITS files:
tt = fits.open(self.data('tb.fits'))
fd = fits.open(self.data('test0.fits'))
# create some local arrays
a1 = chararray.array(['abc', 'def', 'xx'])
r1 = np.array([11., 12., 13.], dtype=np.float32)
# create a table from scratch, using a mixture of columns from existing
# tables and locally created arrays:
# first, create individual column definitions
c1 = fits.Column(name='abc', format='3A', array=a1)
c2 = fits.Column(name='def', format='E', array=r1)
a3 = np.array([3, 4, 5], dtype='i2')
c3 = fits.Column(name='xyz', format='I', array=a3)
a4 = np.array([1, 2, 3], dtype='i2')
c4 = fits.Column(name='t1', format='I', array=a4)
a5 = np.array([3 + 3j, 4 + 4j, 5 + 5j], dtype='c8')
c5 = fits.Column(name='t2', format='C', array=a5)
# Note that X format must be two-D array
a6 = np.array([[0], [1], [0]], dtype=np.uint8)
c6 = fits.Column(name='t3', format='X', array=a6)
a7 = np.array([101, 102, 103], dtype='i4')
c7 = fits.Column(name='t4', format='J', array=a7)
a8 = np.array([[1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1],
[0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0],
[1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1]], dtype=np.uint8)
c8 = fits.Column(name='t5', format='11X', array=a8)
# second, create a column-definitions object for all columns in a table
x = fits.ColDefs([c1, c2, c3, c4, c5, c6, c7, c8])
tbhdu = fits.BinTableHDU.from_columns(x)
# another way to create a table is by using existing table's
# information:
x2 = fits.ColDefs(tt[1])
t2 = fits.BinTableHDU.from_columns(x2, nrows=2)
ra = np.rec.array([
(1, 'abc', 3.7000002861022949, 0),
(2, 'xy ', 6.6999998092651367, 1)], names='c1, c2, c3, c4')
assert comparerecords(t2.data, ra)
# the table HDU's data is a subclass of a record array, so we can
# access one row like this:
assert tbhdu.data[1][0] == a1[1]
assert tbhdu.data[1][1] == r1[1]
assert tbhdu.data[1][2] == a3[1]
assert tbhdu.data[1][3] == a4[1]
assert tbhdu.data[1][4] == a5[1]
assert (tbhdu.data[1][5] == a6[1].view('bool')).all()
assert tbhdu.data[1][6] == a7[1]
assert (tbhdu.data[1][7] == a8[1]).all()
# and a column like this:
assert str(tbhdu.data.field('abc')) == "['abc' 'def' 'xx']"
# An alternative way to create a column-definitions object is from an
# existing table.
_ = fits.ColDefs(tt[1])
# now we write out the newly created table HDU to a FITS file:
fout = fits.HDUList(fits.PrimaryHDU())
fout.append(tbhdu)
fout.writeto(self.temp('tableout1.fits'), overwrite=True)
with fits.open(self.temp('tableout1.fits')) as f2:
temp = f2[1].data.field(7)
assert (temp[0] == [True, True, False, True, False, True,
True, True, False, False, True]).all()
# An alternative way to create an output table FITS file:
fout2 = fits.open(self.temp('tableout2.fits'), 'append')
fout2.append(fd[0])
fout2.append(tbhdu)
fout2.close()
tt.close()
fd.close()
def test_binary_table(self):
# binary table:
t = fits.open(self.data('tb.fits'))
assert t[1].header['tform1'] == '1J'
info = {'name': ['c1', 'c2', 'c3', 'c4'],
'format': ['1J', '3A', '1E', '1L'],
'unit': ['', '', '', ''],
'null': [-2147483647, '', '', ''],
'bscale': ['', '', 3, ''],
'bzero': ['', '', 0.4, ''],
'disp': ['I11', 'A3', 'G15.7', 'L6'],
'start': ['', '', '', ''],
'dim': ['', '', '', ''],
'coord_inc': ['', '', '', ''],
'coord_type': ['', '', '', ''],
'coord_unit': ['', '', '', ''],
'coord_ref_point': ['', '', '', ''],
'coord_ref_value': ['', '', '', ''],
'time_ref_pos': ['', '', '', '']}
assert t[1].columns.info(output=False) == info
ra = np.rec.array([
(1, 'abc', 3.7000002861022949, 0),
(2, 'xy ', 6.6999998092651367, 1)], names='c1, c2, c3, c4')
assert comparerecords(t[1].data, ra[:2])
# Change scaled field and scale back to the original array
t[1].data.field('c4')[0] = 1
t[1].data._scale_back()
assert str(np.rec.recarray.field(t[1].data, 'c4')) == '[84 84]'
# look at data column-wise
assert (t[1].data.field(0) == np.array([1, 2])).all()
# When there are scaled columns, the raw data are in data._parent
t.close()
def test_ascii_table(self):
# ASCII table
a = fits.open(self.data('ascii.fits'))
ra1 = np.rec.array([
(10.123000144958496, 37),
(5.1999998092651367, 23),
(15.609999656677246, 17),
(0.0, 0),
(345.0, 345)], names='c1, c2')
assert comparerecords(a[1].data, ra1)
# Test slicing
a2 = a[1].data[2:][2:]
ra2 = np.rec.array([(345.0, 345)], names='c1, c2')
assert comparerecords(a2, ra2)
assert (a2.field(1) == np.array([345])).all()
ra3 = np.rec.array([
(10.123000144958496, 37),
(15.609999656677246, 17),
(345.0, 345)
], names='c1, c2')
assert comparerecords(a[1].data[::2], ra3)
# Test Start Column
a1 = chararray.array(['abcd', 'def'])
r1 = np.array([11., 12.])
c1 = fits.Column(name='abc', format='A3', start=19, array=a1)
c2 = fits.Column(name='def', format='E', start=3, array=r1)
c3 = fits.Column(name='t1', format='I', array=[91, 92, 93])
hdu = fits.TableHDU.from_columns([c2, c1, c3])
assert (dict(hdu.data.dtype.fields) ==
{'abc': (np.dtype('|S3'), 18),
'def': (np.dtype('|S15'), 2),
't1': (np.dtype('|S10'), 21)})
hdu.writeto(self.temp('toto.fits'), overwrite=True)
hdul = fits.open(self.temp('toto.fits'))
assert comparerecords(hdu.data, hdul[1].data)
hdul.close()
# Test Scaling
r1 = np.array([11., 12.])
c2 = fits.Column(name='def', format='D', array=r1, bscale=2.3,
bzero=0.6)
hdu = fits.TableHDU.from_columns([c2])
hdu.writeto(self.temp('toto.fits'), overwrite=True)
with open(self.temp('toto.fits')) as f:
assert '4.95652173913043548D+00' in f.read()
with fits.open(self.temp('toto.fits')) as hdul:
assert comparerecords(hdu.data, hdul[1].data)
# Test Integer precision according to width
c1 = fits.Column(name='t2', format='I2', array=[91, 92, 93])
c2 = fits.Column(name='t4', format='I5', array=[91, 92, 93])
c3 = fits.Column(name='t8', format='I10', array=[91, 92, 93])
hdu = fits.TableHDU.from_columns([c1, c2, c3])
assert c1.array.dtype == np.int16
assert c2.array.dtype == np.int32
assert c3.array.dtype == np.int64
hdu.writeto(self.temp('toto.fits'), overwrite=True)
with fits.open(self.temp('toto.fits')) as hdul:
assert comparerecords(hdu.data, hdul[1].data)
a.close()
def test_endianness(self):
x = np.ndarray((1,), dtype=object)
channelsIn = np.array([3], dtype='uint8')
x[0] = channelsIn
col = fits.Column(name="Channels", format="PB()", array=x)
cols = fits.ColDefs([col])
tbhdu = fits.BinTableHDU.from_columns(cols)
tbhdu.name = "RFI"
tbhdu.writeto(self.temp('testendian.fits'), overwrite=True)
hduL = fits.open(self.temp('testendian.fits'))
rfiHDU = hduL['RFI']
data = rfiHDU.data
channelsOut = data.field('Channels')[0]
assert (channelsIn == channelsOut).all()
hduL.close()
def test_column_endianness(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/77
(Astropy doesn't preserve byte order of non-native order column arrays)
"""
a = [1., 2., 3., 4.]
a1 = np.array(a, dtype='<f8')
a2 = np.array(a, dtype='>f8')
col1 = fits.Column(name='a', format='D', array=a1)
col2 = fits.Column(name='b', format='D', array=a2)
cols = fits.ColDefs([col1, col2])
tbhdu = fits.BinTableHDU.from_columns(cols)
assert (tbhdu.data['a'] == a1).all()
assert (tbhdu.data['b'] == a2).all()
# Double check that the array is converted to the correct byte-order
# for FITS (big-endian).
tbhdu.writeto(self.temp('testendian.fits'), overwrite=True)
with fits.open(self.temp('testendian.fits')) as hdul:
assert (hdul[1].data['a'] == a2).all()
assert (hdul[1].data['b'] == a2).all()
def test_recarray_to_bintablehdu(self):
bright = np.rec.array(
[(1, 'Serius', -1.45, 'A1V'),
(2, 'Canopys', -0.73, 'F0Ib'),
(3, 'Rigil Kent', -0.1, 'G2V')],
formats='int16,a20,float32,a10',
names='order,name,mag,Sp')
hdu = fits.BinTableHDU(bright)
assert comparerecords(hdu.data, bright)
hdu.writeto(self.temp('toto.fits'), overwrite=True)
hdul = fits.open(self.temp('toto.fits'))
assert comparerecords(hdu.data, hdul[1].data)
assert comparerecords(bright, hdul[1].data)
hdul.close()
def test_numpy_ndarray_to_bintablehdu(self):
desc = np.dtype({'names': ['order', 'name', 'mag', 'Sp'],
'formats': ['int', 'S20', 'float32', 'S10']})
a = np.array([(1, 'Serius', -1.45, 'A1V'),
(2, 'Canopys', -0.73, 'F0Ib'),
(3, 'Rigil Kent', -0.1, 'G2V')], dtype=desc)
hdu = fits.BinTableHDU(a)
assert comparerecords(hdu.data, a.view(fits.FITS_rec))
hdu.writeto(self.temp('toto.fits'), overwrite=True)
hdul = fits.open(self.temp('toto.fits'))
assert comparerecords(hdu.data, hdul[1].data)
hdul.close()
def test_numpy_ndarray_to_bintablehdu_with_unicode(self):
desc = np.dtype({'names': ['order', 'name', 'mag', 'Sp'],
'formats': ['int', 'U20', 'float32', 'U10']})
a = np.array([(1, 'Serius', -1.45, 'A1V'),
(2, 'Canopys', -0.73, 'F0Ib'),
(3, 'Rigil Kent', -0.1, 'G2V')], dtype=desc)
hdu = fits.BinTableHDU(a)
assert comparerecords(hdu.data, a.view(fits.FITS_rec))
hdu.writeto(self.temp('toto.fits'), overwrite=True)
hdul = fits.open(self.temp('toto.fits'))
assert comparerecords(hdu.data, hdul[1].data)
hdul.close()
def test_new_table_from_recarray(self):
bright = np.rec.array([(1, 'Serius', -1.45, 'A1V'),
(2, 'Canopys', -0.73, 'F0Ib'),
(3, 'Rigil Kent', -0.1, 'G2V')],
formats='int16,a20,float64,a10',
names='order,name,mag,Sp')
hdu = fits.TableHDU.from_columns(bright, nrows=2)
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert (id(hdu.data._coldefs.columns[0].array) ==
id(hdu.data._coldefs._arrays[0]))
assert (id(hdu.data._coldefs.columns[0].array) ==
id(hdu.columns.columns[0].array))
assert (id(hdu.data._coldefs.columns[0].array) ==
id(hdu.columns._arrays[0]))
# Ensure I can change the value of one data element and it effects
# all of the others.
hdu.data[0][0] = 213
assert hdu.data[0][0] == 213
assert hdu.data._coldefs._arrays[0][0] == 213
assert hdu.data._coldefs.columns[0].array[0] == 213
assert hdu.columns._arrays[0][0] == 213
assert hdu.columns.columns[0].array[0] == 213
hdu.data._coldefs._arrays[0][0] = 100
assert hdu.data[0][0] == 100
assert hdu.data._coldefs._arrays[0][0] == 100
assert hdu.data._coldefs.columns[0].array[0] == 100
assert hdu.columns._arrays[0][0] == 100
assert hdu.columns.columns[0].array[0] == 100
hdu.data._coldefs.columns[0].array[0] = 500
assert hdu.data[0][0] == 500
assert hdu.data._coldefs._arrays[0][0] == 500
assert hdu.data._coldefs.columns[0].array[0] == 500
assert hdu.columns._arrays[0][0] == 500
assert hdu.columns.columns[0].array[0] == 500
hdu.columns._arrays[0][0] = 600
assert hdu.data[0][0] == 600
assert hdu.data._coldefs._arrays[0][0] == 600
assert hdu.data._coldefs.columns[0].array[0] == 600
assert hdu.columns._arrays[0][0] == 600
assert hdu.columns.columns[0].array[0] == 600
hdu.columns.columns[0].array[0] = 800
assert hdu.data[0][0] == 800
assert hdu.data._coldefs._arrays[0][0] == 800
assert hdu.data._coldefs.columns[0].array[0] == 800
assert hdu.columns._arrays[0][0] == 800
assert hdu.columns.columns[0].array[0] == 800
assert (hdu.data.field(0) ==
np.array([800, 2], dtype=np.int16)).all()
assert hdu.data[0][1] == 'Serius'
assert hdu.data[1][1] == 'Canopys'
assert (hdu.data.field(2) ==
np.array([-1.45, -0.73], dtype=np.float64)).all()
assert hdu.data[0][3] == 'A1V'
assert hdu.data[1][3] == 'F0Ib'
hdu.writeto(self.temp('toto.fits'), overwrite=True)
with fits.open(self.temp('toto.fits')) as hdul:
assert (hdul[1].data.field(0) ==
np.array([800, 2], dtype=np.int16)).all()
assert hdul[1].data[0][1] == 'Serius'
assert hdul[1].data[1][1] == 'Canopys'
assert (hdul[1].data.field(2) ==
np.array([-1.45, -0.73], dtype=np.float64)).all()
assert hdul[1].data[0][3] == 'A1V'
assert hdul[1].data[1][3] == 'F0Ib'
del hdul
hdu = fits.BinTableHDU.from_columns(bright, nrows=2)
tmp = np.rec.array([(1, 'Serius', -1.45, 'A1V'),
(2, 'Canopys', -0.73, 'F0Ib')],
formats='int16,a20,float64,a10',
names='order,name,mag,Sp')
assert comparerecords(hdu.data, tmp)
hdu.writeto(self.temp('toto.fits'), overwrite=True)
with fits.open(self.temp('toto.fits')) as hdul:
assert comparerecords(hdu.data, hdul[1].data)
def test_new_fitsrec(self):
"""
Tests creating a new FITS_rec object from a multi-field ndarray.
"""
with fits.open(self.data('tb.fits')) as h:
data = h[1].data
new_data = np.array([(3, 'qwe', 4.5, False)], dtype=data.dtype)
appended = np.append(data, new_data).view(fits.FITS_rec)
assert repr(appended).startswith('FITS_rec(')
# This test used to check the entire string representation of FITS_rec,
# but that has problems between different numpy versions. Instead just
# check that the FITS_rec was created, and we'll let subsequent tests
# worry about checking values and such
def test_appending_a_column(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp('table1.fits'))
counts = np.array([412, 434, 408, 417])
names = np.array(['NGC5', 'NGC6', 'NGC7', 'NCG8'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[0, 1, 0, 0])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp('table2.fits'))
# Append the rows of table 2 after the rows of table 1
# The column definitions are assumed to be the same
# Open the two files we want to append
t1 = fits.open(self.temp('table1.fits'))
t2 = fits.open(self.temp('table2.fits'))
# Get the number of rows in the table from the first file
nrows1 = t1[1].data.shape[0]
# Get the total number of rows in the resulting appended table
nrows = t1[1].data.shape[0] + t2[1].data.shape[0]
assert (t1[1].columns._arrays[1] is t1[1].columns.columns[1].array)
# Create a new table that consists of the data from the first table
# but has enough space in the ndarray to hold the data from both tables
hdu = fits.BinTableHDU.from_columns(t1[1].columns, nrows=nrows)
# For each column in the tables append the data from table 2 after the
# data from table 1.
for i in range(len(t1[1].columns)):
hdu.data.field(i)[nrows1:] = t2[1].data.field(i)
hdu.writeto(self.temp('newtable.fits'))
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''),
(1, '', 1, 'BinTableHDU', 19, '8R x 5C', '[10A, J, 10A, 5E, L]',
'')]
assert fits.info(self.temp('newtable.fits'), output=False) == info
z = np.array([0., 0., 0., 0., 0.], dtype=np.float32)
array = np.rec.array(
[('NGC1', 312, '', z, True),
('NGC2', 334, '', z, False),
('NGC3', 308, '', z, True),
('NCG4', 317, '', z, True),
('NGC5', 412, '', z, False),
('NGC6', 434, '', z, True),
('NGC7', 408, '', z, False),
('NCG8', 417, '', z, False)],
formats='a10,u4,a10,5f4,l')
assert comparerecords(hdu.data, array)
# Verify that all of the references to the data point to the same
# numarray
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
# Same verification from the file
hdul = fits.open(self.temp('newtable.fits'))
hdu = hdul[1]
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
t1.close()
t2.close()
hdul.close()
def test_adding_a_column(self):
# Tests adding a column to a table.
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
assert tbhdu.columns.names == ['target', 'counts', 'notes', 'spectrum']
coldefs1 = coldefs + c5
tbhdu1 = fits.BinTableHDU.from_columns(coldefs1)
assert tbhdu1.columns.names == ['target', 'counts', 'notes',
'spectrum', 'flag']
z = np.array([0., 0., 0., 0., 0.], dtype=np.float32)
array = np.rec.array(
[('NGC1', 312, '', z, True),
('NGC2', 334, '', z, False),
('NGC3', 308, '', z, True),
('NCG4', 317, '', z, True)],
formats='a10,u4,a10,5f4,l')
assert comparerecords(tbhdu1.data, array)
def test_adding_a_column_inplace(self):
# Tests adding a column to a table.
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
assert tbhdu.columns.names == ['target', 'counts', 'notes', 'spectrum']
tbhdu.columns.add_col(c5)
assert tbhdu.columns.names == ['target', 'counts', 'notes',
'spectrum', 'flag']
z = np.array([0., 0., 0., 0., 0.], dtype=np.float32)
array = np.rec.array(
[('NGC1', 312, '', z, True),
('NGC2', 334, '', z, False),
('NGC3', 308, '', z, True),
('NCG4', 317, '', z, True)],
formats='a10,u4,a10,5f4,l')
assert comparerecords(tbhdu.data, array)
def test_adding_a_column_to_file(self):
hdul = fits.open(self.data('table.fits'))
tbhdu = hdul[1]
col = fits.Column(name='a', array=np.array([1, 2]), format='K')
tbhdu.columns.add_col(col)
assert tbhdu.columns.names == ['target', 'V_mag', 'a']
array = np.rec.array(
[('NGC1001', 11.1, 1),
('NGC1002', 12.3, 2),
('NGC1003', 15.2, 0)],
formats='a20,f4,i8')
assert comparerecords(tbhdu.data, array)
hdul.close()
def test_removing_a_column_inplace(self):
# Tests adding a column to a table.
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
assert tbhdu.columns.names == ['target', 'counts', 'notes',
'spectrum', 'flag']
tbhdu.columns.del_col('flag')
assert tbhdu.columns.names == ['target', 'counts', 'notes', 'spectrum']
z = np.array([0., 0., 0., 0., 0.], dtype=np.float32)
array = np.rec.array(
[('NGC1', 312, '', z),
('NGC2', 334, '', z),
('NGC3', 308, '', z),
('NCG4', 317, '', z)],
formats='a10,u4,a10,5f4')
assert comparerecords(tbhdu.data, array)
tbhdu.columns.del_col('counts')
tbhdu.columns.del_col('notes')
assert tbhdu.columns.names == ['target', 'spectrum']
array = np.rec.array(
[('NGC1', z),
('NGC2', z),
('NGC3', z),
('NCG4', z)],
formats='a10,5f4')
assert comparerecords(tbhdu.data, array)
def test_removing_a_column_from_file(self):
hdul = fits.open(self.data('table.fits'))
tbhdu = hdul[1]
tbhdu.columns.del_col('V_mag')
assert tbhdu.columns.names == ['target']
array = np.rec.array(
[('NGC1001', ),
('NGC1002', ),
('NGC1003', )],
formats='a20')
assert comparerecords(tbhdu.data, array)
hdul.close()
def test_merge_tables(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp('table1.fits'))
counts = np.array([412, 434, 408, 417])
names = np.array(['NGC5', 'NGC6', 'NGC7', 'NCG8'])
c1 = fits.Column(name='target1', format='10A', array=names)
c2 = fits.Column(name='counts1', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes1', format='A10')
c4 = fits.Column(name='spectrum1', format='5E')
c5 = fits.Column(name='flag1', format='L', array=[0, 1, 0, 0])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp('table2.fits'))
# Merge the columns of table 2 after the columns of table 1
# The column names are assumed to be different
# Open the two files we want to append
t1 = fits.open(self.temp('table1.fits'))
t2 = fits.open(self.temp('table2.fits'))
hdu = fits.BinTableHDU.from_columns(t1[1].columns + t2[1].columns)
z = np.array([0., 0., 0., 0., 0.], dtype=np.float32)
array = np.rec.array(
[('NGC1', 312, '', z, True, 'NGC5', 412, '', z, False),
('NGC2', 334, '', z, False, 'NGC6', 434, '', z, True),
('NGC3', 308, '', z, True, 'NGC7', 408, '', z, False),
('NCG4', 317, '', z, True, 'NCG8', 417, '', z, False)],
formats='a10,u4,a10,5f4,l,a10,u4,a10,5f4,l')
assert comparerecords(hdu.data, array)
hdu.writeto(self.temp('newtable.fits'))
# Verify that all of the references to the data point to the same
# numarray
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''),
(1, '', 1, 'BinTableHDU', 30, '4R x 10C',
'[10A, J, 10A, 5E, L, 10A, J, 10A, 5E, L]', '')]
assert fits.info(self.temp('newtable.fits'), output=False) == info
hdul = fits.open(self.temp('newtable.fits'))
hdu = hdul[1]
assert (hdu.columns.names ==
['target', 'counts', 'notes', 'spectrum', 'flag', 'target1',
'counts1', 'notes1', 'spectrum1', 'flag1'])
z = np.array([0., 0., 0., 0., 0.], dtype=np.float32)
array = np.rec.array(
[('NGC1', 312, '', z, True, 'NGC5', 412, '', z, False),
('NGC2', 334, '', z, False, 'NGC6', 434, '', z, True),
('NGC3', 308, '', z, True, 'NGC7', 408, '', z, False),
('NCG4', 317, '', z, True, 'NCG8', 417, '', z, False)],
formats='a10,u4,a10,5f4,l,a10,u4,a10,5f4,l')
assert comparerecords(hdu.data, array)
# Same verification from the file
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
t1.close()
t2.close()
hdul.close()
def test_modify_column_attributes(self):
"""Regression test for https://github.com/astropy/astropy/issues/996
This just tests one particular use case, but it should apply pretty
well to other similar cases.
"""
NULLS = {'a': 2, 'b': 'b', 'c': 2.3}
data = np.array(list(zip([1, 2, 3, 4],
['a', 'b', 'c', 'd'],
[2.3, 4.5, 6.7, 8.9])),
dtype=[('a', int), ('b', 'S1'), ('c', float)])
b = fits.BinTableHDU(data=data)
for col in b.columns:
col.null = NULLS[col.name]
b.writeto(self.temp('test.fits'), overwrite=True)
with fits.open(self.temp('test.fits')) as hdul:
header = hdul[1].header
assert header['TNULL1'] == 2
assert header['TNULL2'] == 'b'
assert header['TNULL3'] == 2.3
def test_multidimension_table_from_numpy_rec_columns(self):
"""Regression test for https://github.com/astropy/astropy/issues/5280
and https://github.com/astropy/astropy/issues/5287
multidimentional tables can now be written with the correct TDIM.
Author: Stephen Bailey.
"""
dtype = [
(str('x'), (str, 5)), # 1D column of 5-character strings
(str('y'), (str, 3), (4,)), # 2D column; each row is four 3-char strings
]
data = np.zeros(2, dtype=dtype)
data['x'] = ['abcde', 'xyz']
data['y'][0] = ['A', 'BC', 'DEF', '123']
data['y'][1] = ['X', 'YZ', 'PQR', '999']
table = Table(data)
# Test convenience functions io.fits.writeto / getdata
fits.writeto(self.temp('test.fits'), data)
dx = fits.getdata(self.temp('test.fits'))
assert data['x'].dtype == dx['x'].dtype
assert data['y'].dtype == dx['y'].dtype
assert np.all(data['x'] == dx['x']), 'x: {} != {}'.format(data['x'], dx['x'])
assert np.all(data['y'] == dx['y']), 'y: {} != {}'.format(data['y'], dx['y'])
# Test fits.BinTableHDU(data) and avoid convenience functions
hdu0 = fits.PrimaryHDU()
hdu1 = fits.BinTableHDU(data)
hx = fits.HDUList([hdu0, hdu1])
hx.writeto(self.temp('test2.fits'))
fx = fits.open(self.temp('test2.fits'))
dx = fx[1].data
fx.close()
assert data['x'].dtype == dx['x'].dtype
assert data['y'].dtype == dx['y'].dtype
assert np.all(data['x'] == dx['x']), 'x: {} != {}'.format(data['x'], dx['x'])
assert np.all(data['y'] == dx['y']), 'y: {} != {}'.format(data['y'], dx['y'])
# Test Table write and read
table.write(self.temp('test3.fits'))
tx = Table.read(self.temp('test3.fits'), character_as_bytes=False)
assert table['x'].dtype == tx['x'].dtype
assert table['y'].dtype == tx['y'].dtype
assert np.all(table['x'] == tx['x']), 'x: {} != {}'.format(table['x'], tx['x'])
assert np.all(table['y'] == tx['y']), 'y: {} != {}'.format(table['y'], tx['y'])
def test_mask_array(self):
t = fits.open(self.data('table.fits'))
tbdata = t[1].data
mask = tbdata.field('V_mag') > 12
newtbdata = tbdata[mask]
hdu = fits.BinTableHDU(newtbdata)
hdu.writeto(self.temp('newtable.fits'))
hdul = fits.open(self.temp('newtable.fits'))
# match to a regex rather than a specific string.
expect = r"\[\('NGC1002',\s+12.3[0-9]*\) \(\'NGC1003\',\s+15.[0-9]+\)\]"
assert re.match(expect, str(hdu.data))
assert re.match(expect, str(hdul[1].data))
t.close()
hdul.close()
def test_slice_a_row(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp('table1.fits'))
t1 = fits.open(self.temp('table1.fits'))
row = t1[1].data[2]
assert row['counts'] == 308
a, b, c = row[1:4]
assert a == counts[2]
assert b == ''
assert (c == np.array([0., 0., 0., 0., 0.], dtype=np.float32)).all()
row['counts'] = 310
assert row['counts'] == 310
row[1] = 315
assert row['counts'] == 315
assert row[1:4]['counts'] == 315
pytest.raises(KeyError, lambda r: r[1:4]['flag'], row)
row[1:4]['counts'] = 300
assert row[1:4]['counts'] == 300
assert row['counts'] == 300
row[1:4][0] = 400
assert row[1:4]['counts'] == 400
row[1:4]['counts'] = 300
assert row[1:4]['counts'] == 300
# Test stepping for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/59
row[1:4][::-1][-1] = 500
assert row[1:4]['counts'] == 500
row[1:4:2][0] = 300
assert row[1:4]['counts'] == 300
pytest.raises(KeyError, lambda r: r[1:4]['flag'], row)
assert row[1:4].field(0) == 300
assert row[1:4].field('counts') == 300
pytest.raises(KeyError, row[1:4].field, 'flag')
row[1:4].setfield('counts', 500)
assert row[1:4].field(0) == 500
pytest.raises(KeyError, row[1:4].setfield, 'flag', False)
assert t1[1].data._coldefs._arrays[1][2] == 500
assert t1[1].data._coldefs.columns[1].array[2] == 500
assert t1[1].columns._arrays[1][2] == 500
assert t1[1].columns.columns[1].array[2] == 500
assert t1[1].data[2][1] == 500
t1.close()
def test_fits_record_len(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp('table1.fits'))
t1 = fits.open(self.temp('table1.fits'))
assert len(t1[1].data[0]) == 5
assert len(t1[1].data[0][0:4]) == 4
assert len(t1[1].data[0][0:5]) == 5
assert len(t1[1].data[0][0:6]) == 5
assert len(t1[1].data[0][0:7]) == 5
assert len(t1[1].data[0][1:4]) == 3
assert len(t1[1].data[0][1:5]) == 4
assert len(t1[1].data[0][1:6]) == 4
assert len(t1[1].data[0][1:7]) == 4
t1.close()
def test_add_data_by_rows(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
c1 = fits.Column(name='target', format='10A')
c2 = fits.Column(name='counts', format='J', unit='DN')
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L')
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs, nrows=5)
# Test assigning data to a tables row using a FITS_record
tbhdu.data[0] = tbhdu1.data[0]
tbhdu.data[4] = tbhdu1.data[3]
# Test assigning data to a tables row using a tuple
tbhdu.data[2] = ('NGC1', 312, 'A Note',
np.array([1.1, 2.2, 3.3, 4.4, 5.5], dtype=np.float32),
True)
# Test assigning data to a tables row using a list
tbhdu.data[3] = ['JIM1', '33', 'A Note',
np.array([1., 2., 3., 4., 5.], dtype=np.float32),
True]
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert (id(tbhdu.data._coldefs.columns[0].array) ==
id(tbhdu.data._coldefs._arrays[0]))
assert (id(tbhdu.data._coldefs.columns[0].array) ==
id(tbhdu.columns.columns[0].array))
assert (id(tbhdu.data._coldefs.columns[0].array) ==
id(tbhdu.columns._arrays[0]))
assert tbhdu.data[0][1] == 312
assert tbhdu.data._coldefs._arrays[1][0] == 312
assert tbhdu.data._coldefs.columns[1].array[0] == 312
assert tbhdu.columns._arrays[1][0] == 312
assert tbhdu.columns.columns[1].array[0] == 312
assert tbhdu.columns.columns[0].array[0] == 'NGC1'
assert tbhdu.columns.columns[2].array[0] == ''
assert (tbhdu.columns.columns[3].array[0] ==
np.array([0., 0., 0., 0., 0.], dtype=np.float32)).all()
assert tbhdu.columns.columns[4].array[0] == True # noqa
assert tbhdu.data[3][1] == 33
assert tbhdu.data._coldefs._arrays[1][3] == 33
assert tbhdu.data._coldefs.columns[1].array[3] == 33
assert tbhdu.columns._arrays[1][3] == 33
assert tbhdu.columns.columns[1].array[3] == 33
assert tbhdu.columns.columns[0].array[3] == 'JIM1'
assert tbhdu.columns.columns[2].array[3] == 'A Note'
assert (tbhdu.columns.columns[3].array[3] ==
np.array([1., 2., 3., 4., 5.], dtype=np.float32)).all()
assert tbhdu.columns.columns[4].array[3] == True # noqa
def test_assign_multiple_rows_to_table(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
counts = np.array([112, 134, 108, 117])
names = np.array(['NGC5', 'NGC6', 'NGC7', 'NCG8'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[0, 1, 0, 0])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.data[0][3] = np.array([1., 2., 3., 4., 5.], dtype=np.float32)
tbhdu2 = fits.BinTableHDU.from_columns(tbhdu1.data, nrows=9)
# Assign the 4 rows from the second table to rows 5 thru 8 of the
# new table. Note that the last row of the new table will still be
# initialized to the default values.
tbhdu2.data[4:] = tbhdu.data
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert (id(tbhdu2.data._coldefs.columns[0].array) ==
id(tbhdu2.data._coldefs._arrays[0]))
assert (id(tbhdu2.data._coldefs.columns[0].array) ==
id(tbhdu2.columns.columns[0].array))
assert (id(tbhdu2.data._coldefs.columns[0].array) ==
id(tbhdu2.columns._arrays[0]))
assert tbhdu2.data[0][1] == 312
assert tbhdu2.data._coldefs._arrays[1][0] == 312
assert tbhdu2.data._coldefs.columns[1].array[0] == 312
assert tbhdu2.columns._arrays[1][0] == 312
assert tbhdu2.columns.columns[1].array[0] == 312
assert tbhdu2.columns.columns[0].array[0] == 'NGC1'
assert tbhdu2.columns.columns[2].array[0] == ''
assert (tbhdu2.columns.columns[3].array[0] ==
np.array([0., 0., 0., 0., 0.], dtype=np.float32)).all()
assert tbhdu2.columns.columns[4].array[0] == True # noqa
assert tbhdu2.data[4][1] == 112
assert tbhdu2.data._coldefs._arrays[1][4] == 112
assert tbhdu2.data._coldefs.columns[1].array[4] == 112
assert tbhdu2.columns._arrays[1][4] == 112
assert tbhdu2.columns.columns[1].array[4] == 112
assert tbhdu2.columns.columns[0].array[4] == 'NGC5'
assert tbhdu2.columns.columns[2].array[4] == ''
assert (tbhdu2.columns.columns[3].array[4] ==
np.array([1., 2., 3., 4., 5.], dtype=np.float32)).all()
assert tbhdu2.columns.columns[4].array[4] == False # noqa
assert tbhdu2.columns.columns[1].array[8] == 0
assert tbhdu2.columns.columns[0].array[8] == ''
assert tbhdu2.columns.columns[2].array[8] == ''
assert (tbhdu2.columns.columns[3].array[8] ==
np.array([0., 0., 0., 0., 0.], dtype=np.float32)).all()
assert tbhdu2.columns.columns[4].array[8] == False # noqa
def test_verify_data_references(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
# Verify that original ColDefs object has independent Column
# objects.
assert id(coldefs.columns[0]) != id(c1)
# Verify that original ColDefs object has independent ndarray
# objects.
assert id(coldefs.columns[0].array) != id(names)
# Verify that original ColDefs object references the same data
# object as the original Column object.
assert id(coldefs.columns[0].array) == id(c1.array)
assert id(coldefs.columns[0].array) == id(coldefs._arrays[0])
# Verify new HDU has an independent ColDefs object.
assert id(coldefs) != id(tbhdu.columns)
# Verify new HDU has independent Column objects.
assert id(coldefs.columns[0]) != id(tbhdu.columns.columns[0])
# Verify new HDU has independent ndarray objects.
assert (id(coldefs.columns[0].array) !=
id(tbhdu.columns.columns[0].array))
# Verify that both ColDefs objects in the HDU reference the same
# Coldefs object.
assert id(tbhdu.columns) == id(tbhdu.data._coldefs)
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert (id(tbhdu.data._coldefs.columns[0].array) ==
id(tbhdu.data._coldefs._arrays[0]))
assert (id(tbhdu.data._coldefs.columns[0].array) ==
id(tbhdu.columns.columns[0].array))
assert (id(tbhdu.data._coldefs.columns[0].array) ==
id(tbhdu.columns._arrays[0]))
tbhdu.writeto(self.temp('table1.fits'))
t1 = fits.open(self.temp('table1.fits'))
t1[1].data[0][1] = 213
assert t1[1].data[0][1] == 213
assert t1[1].data._coldefs._arrays[1][0] == 213
assert t1[1].data._coldefs.columns[1].array[0] == 213
assert t1[1].columns._arrays[1][0] == 213
assert t1[1].columns.columns[1].array[0] == 213
t1[1].data._coldefs._arrays[1][0] = 100
assert t1[1].data[0][1] == 100
assert t1[1].data._coldefs._arrays[1][0] == 100
assert t1[1].data._coldefs.columns[1].array[0] == 100
assert t1[1].columns._arrays[1][0] == 100
assert t1[1].columns.columns[1].array[0] == 100
t1[1].data._coldefs.columns[1].array[0] = 500
assert t1[1].data[0][1] == 500
assert t1[1].data._coldefs._arrays[1][0] == 500
assert t1[1].data._coldefs.columns[1].array[0] == 500
assert t1[1].columns._arrays[1][0] == 500
assert t1[1].columns.columns[1].array[0] == 500
t1[1].columns._arrays[1][0] = 600
assert t1[1].data[0][1] == 600
assert t1[1].data._coldefs._arrays[1][0] == 600
assert t1[1].data._coldefs.columns[1].array[0] == 600
assert t1[1].columns._arrays[1][0] == 600
assert t1[1].columns.columns[1].array[0] == 600
t1[1].columns.columns[1].array[0] = 800
assert t1[1].data[0][1] == 800
assert t1[1].data._coldefs._arrays[1][0] == 800
assert t1[1].data._coldefs.columns[1].array[0] == 800
assert t1[1].columns._arrays[1][0] == 800
assert t1[1].columns.columns[1].array[0] == 800
t1.close()
def test_new_table_with_ndarray(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu1 = fits.BinTableHDU.from_columns(tbhdu.data.view(np.ndarray))
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert (id(tbhdu1.data._coldefs.columns[0].array) ==
id(tbhdu1.data._coldefs._arrays[0]))
assert (id(tbhdu1.data._coldefs.columns[0].array) ==
id(tbhdu1.columns.columns[0].array))
assert (id(tbhdu1.data._coldefs.columns[0].array) ==
id(tbhdu1.columns._arrays[0]))
# Ensure I can change the value of one data element and it effects
# all of the others.
tbhdu1.data[0][1] = 213
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
tbhdu1.data._coldefs._arrays[1][0] = 100
assert tbhdu1.data[0][1] == 100
assert tbhdu1.data._coldefs._arrays[1][0] == 100
assert tbhdu1.data._coldefs.columns[1].array[0] == 100
assert tbhdu1.columns._arrays[1][0] == 100
assert tbhdu1.columns.columns[1].array[0] == 100
tbhdu1.data._coldefs.columns[1].array[0] = 500
assert tbhdu1.data[0][1] == 500
assert tbhdu1.data._coldefs._arrays[1][0] == 500
assert tbhdu1.data._coldefs.columns[1].array[0] == 500
assert tbhdu1.columns._arrays[1][0] == 500
assert tbhdu1.columns.columns[1].array[0] == 500
tbhdu1.columns._arrays[1][0] = 600
assert tbhdu1.data[0][1] == 600
assert tbhdu1.data._coldefs._arrays[1][0] == 600
assert tbhdu1.data._coldefs.columns[1].array[0] == 600
assert tbhdu1.columns._arrays[1][0] == 600
assert tbhdu1.columns.columns[1].array[0] == 600
tbhdu1.columns.columns[1].array[0] = 800
assert tbhdu1.data[0][1] == 800
assert tbhdu1.data._coldefs._arrays[1][0] == 800
assert tbhdu1.data._coldefs.columns[1].array[0] == 800
assert tbhdu1.columns._arrays[1][0] == 800
assert tbhdu1.columns.columns[1].array[0] == 800
tbhdu1.writeto(self.temp('table1.fits'))
t1 = fits.open(self.temp('table1.fits'))
t1[1].data[0][1] = 213
assert t1[1].data[0][1] == 213
assert t1[1].data._coldefs._arrays[1][0] == 213
assert t1[1].data._coldefs.columns[1].array[0] == 213
assert t1[1].columns._arrays[1][0] == 213
assert t1[1].columns.columns[1].array[0] == 213
t1[1].data._coldefs._arrays[1][0] = 100
assert t1[1].data[0][1] == 100
assert t1[1].data._coldefs._arrays[1][0] == 100
assert t1[1].data._coldefs.columns[1].array[0] == 100
assert t1[1].columns._arrays[1][0] == 100
assert t1[1].columns.columns[1].array[0] == 100
t1[1].data._coldefs.columns[1].array[0] = 500
assert t1[1].data[0][1] == 500
assert t1[1].data._coldefs._arrays[1][0] == 500
assert t1[1].data._coldefs.columns[1].array[0] == 500
assert t1[1].columns._arrays[1][0] == 500
assert t1[1].columns.columns[1].array[0] == 500
t1[1].columns._arrays[1][0] = 600
assert t1[1].data[0][1] == 600
assert t1[1].data._coldefs._arrays[1][0] == 600
assert t1[1].data._coldefs.columns[1].array[0] == 600
assert t1[1].columns._arrays[1][0] == 600
assert t1[1].columns.columns[1].array[0] == 600
t1[1].columns.columns[1].array[0] = 800
assert t1[1].data[0][1] == 800
assert t1[1].data._coldefs._arrays[1][0] == 800
assert t1[1].data._coldefs.columns[1].array[0] == 800
assert t1[1].columns._arrays[1][0] == 800
assert t1[1].columns.columns[1].array[0] == 800
t1.close()
def test_new_table_with_fits_rec(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.data[0][1] = 213
assert tbhdu.data[0][1] == 213
assert tbhdu.data._coldefs._arrays[1][0] == 213
assert tbhdu.data._coldefs.columns[1].array[0] == 213
assert tbhdu.columns._arrays[1][0] == 213
assert tbhdu.columns.columns[1].array[0] == 213
tbhdu.data._coldefs._arrays[1][0] = 100
assert tbhdu.data[0][1] == 100
assert tbhdu.data._coldefs._arrays[1][0] == 100
assert tbhdu.data._coldefs.columns[1].array[0] == 100
assert tbhdu.columns._arrays[1][0] == 100
assert tbhdu.columns.columns[1].array[0] == 100
tbhdu.data._coldefs.columns[1].array[0] = 500
assert tbhdu.data[0][1] == 500
assert tbhdu.data._coldefs._arrays[1][0] == 500
assert tbhdu.data._coldefs.columns[1].array[0] == 500
assert tbhdu.columns._arrays[1][0] == 500
assert tbhdu.columns.columns[1].array[0] == 500
tbhdu.columns._arrays[1][0] = 600
assert tbhdu.data[0][1] == 600
assert tbhdu.data._coldefs._arrays[1][0] == 600
assert tbhdu.data._coldefs.columns[1].array[0] == 600
assert tbhdu.columns._arrays[1][0] == 600
assert tbhdu.columns.columns[1].array[0] == 600
tbhdu.columns.columns[1].array[0] = 800
assert tbhdu.data[0][1] == 800
assert tbhdu.data._coldefs._arrays[1][0] == 800
assert tbhdu.data._coldefs.columns[1].array[0] == 800
assert tbhdu.columns._arrays[1][0] == 800
assert tbhdu.columns.columns[1].array[0] == 800
tbhdu.columns.columns[1].array[0] = 312
tbhdu.writeto(self.temp('table1.fits'))
t1 = fits.open(self.temp('table1.fits'))
t1[1].data[0][1] = 1
fr = t1[1].data
assert t1[1].data[0][1] == 1
assert t1[1].data._coldefs._arrays[1][0] == 1
assert t1[1].data._coldefs.columns[1].array[0] == 1
assert t1[1].columns._arrays[1][0] == 1
assert t1[1].columns.columns[1].array[0] == 1
assert fr[0][1] == 1
assert fr._coldefs._arrays[1][0] == 1
assert fr._coldefs.columns[1].array[0] == 1
fr._coldefs.columns[1].array[0] = 312
tbhdu1 = fits.BinTableHDU.from_columns(fr)
i = 0
for row in tbhdu1.data:
for j in range(len(row)):
if isinstance(row[j], np.ndarray):
assert (row[j] == tbhdu.data[i][j]).all()
else:
assert row[j] == tbhdu.data[i][j]
i = i + 1
tbhdu1.data[0][1] = 213
assert t1[1].data[0][1] == 312
assert t1[1].data._coldefs._arrays[1][0] == 312
assert t1[1].data._coldefs.columns[1].array[0] == 312
assert t1[1].columns._arrays[1][0] == 312
assert t1[1].columns.columns[1].array[0] == 312
assert fr[0][1] == 312
assert fr._coldefs._arrays[1][0] == 312
assert fr._coldefs.columns[1].array[0] == 312
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
t1[1].data[0][1] = 10
assert t1[1].data[0][1] == 10
assert t1[1].data._coldefs._arrays[1][0] == 10
assert t1[1].data._coldefs.columns[1].array[0] == 10
assert t1[1].columns._arrays[1][0] == 10
assert t1[1].columns.columns[1].array[0] == 10
assert fr[0][1] == 10
assert fr._coldefs._arrays[1][0] == 10
assert fr._coldefs.columns[1].array[0] == 10
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
tbhdu1.data._coldefs._arrays[1][0] = 666
assert t1[1].data[0][1] == 10
assert t1[1].data._coldefs._arrays[1][0] == 10
assert t1[1].data._coldefs.columns[1].array[0] == 10
assert t1[1].columns._arrays[1][0] == 10
assert t1[1].columns.columns[1].array[0] == 10
assert fr[0][1] == 10
assert fr._coldefs._arrays[1][0] == 10
assert fr._coldefs.columns[1].array[0] == 10
assert tbhdu1.data[0][1] == 666
assert tbhdu1.data._coldefs._arrays[1][0] == 666
assert tbhdu1.data._coldefs.columns[1].array[0] == 666
assert tbhdu1.columns._arrays[1][0] == 666
assert tbhdu1.columns.columns[1].array[0] == 666
t1.close()
def test_bin_table_hdu_constructor(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
hdu = fits.BinTableHDU(tbhdu1.data)
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert (id(hdu.data._coldefs.columns[0].array) ==
id(hdu.data._coldefs._arrays[0]))
assert (id(hdu.data._coldefs.columns[0].array) ==
id(hdu.columns.columns[0].array))
assert (id(hdu.data._coldefs.columns[0].array) ==
id(hdu.columns._arrays[0]))
# Verify that the references in the original HDU are the same as the
# references in the new HDU.
assert (id(tbhdu1.data._coldefs.columns[0].array) ==
id(hdu.data._coldefs._arrays[0]))
# Verify that a change in the new HDU is reflected in both the new
# and original HDU.
hdu.data[0][1] = 213
assert hdu.data[0][1] == 213
assert hdu.data._coldefs._arrays[1][0] == 213
assert hdu.data._coldefs.columns[1].array[0] == 213
assert hdu.columns._arrays[1][0] == 213
assert hdu.columns.columns[1].array[0] == 213
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
hdu.data._coldefs._arrays[1][0] = 100
assert hdu.data[0][1] == 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert tbhdu1.data[0][1] == 100
assert tbhdu1.data._coldefs._arrays[1][0] == 100
assert tbhdu1.data._coldefs.columns[1].array[0] == 100
assert tbhdu1.columns._arrays[1][0] == 100
assert tbhdu1.columns.columns[1].array[0] == 100
hdu.data._coldefs.columns[1].array[0] = 500
assert hdu.data[0][1] == 500
assert hdu.data._coldefs._arrays[1][0] == 500
assert hdu.data._coldefs.columns[1].array[0] == 500
assert hdu.columns._arrays[1][0] == 500
assert hdu.columns.columns[1].array[0] == 500
assert tbhdu1.data[0][1] == 500
assert tbhdu1.data._coldefs._arrays[1][0] == 500
assert tbhdu1.data._coldefs.columns[1].array[0] == 500
assert tbhdu1.columns._arrays[1][0] == 500
assert tbhdu1.columns.columns[1].array[0] == 500
hdu.columns._arrays[1][0] = 600
assert hdu.data[0][1] == 600
assert hdu.data._coldefs._arrays[1][0] == 600
assert hdu.data._coldefs.columns[1].array[0] == 600
assert hdu.columns._arrays[1][0] == 600
assert hdu.columns.columns[1].array[0] == 600
assert tbhdu1.data[0][1] == 600
assert tbhdu1.data._coldefs._arrays[1][0] == 600
assert tbhdu1.data._coldefs.columns[1].array[0] == 600
assert tbhdu1.columns._arrays[1][0] == 600
assert tbhdu1.columns.columns[1].array[0] == 600
hdu.columns.columns[1].array[0] = 800
assert hdu.data[0][1] == 800
assert hdu.data._coldefs._arrays[1][0] == 800
assert hdu.data._coldefs.columns[1].array[0] == 800
assert hdu.columns._arrays[1][0] == 800
assert hdu.columns.columns[1].array[0] == 800
assert tbhdu1.data[0][1] == 800
assert tbhdu1.data._coldefs._arrays[1][0] == 800
assert tbhdu1.data._coldefs.columns[1].array[0] == 800
assert tbhdu1.columns._arrays[1][0] == 800
assert tbhdu1.columns.columns[1].array[0] == 800
def test_constructor_name_arg(self):
"""testConstructorNameArg
Passing name='...' to the BinTableHDU and TableHDU constructors
should set the .name attribute and 'EXTNAME' header keyword, and
override any name in an existing 'EXTNAME' value.
"""
for hducls in [fits.BinTableHDU, fits.TableHDU]:
# First test some default assumptions
hdu = hducls()
assert hdu.name == ''
assert 'EXTNAME' not in hdu.header
hdu.name = 'FOO'
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
# Passing name to constructor
hdu = hducls(name='FOO')
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
# And overriding a header with a different extname
hdr = fits.Header()
hdr['EXTNAME'] = 'EVENTS'
hdu = hducls(header=hdr, name='FOO')
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
def test_constructor_ver_arg(self):
for hducls in [fits.BinTableHDU, fits.TableHDU]:
# First test some default assumptions
hdu = hducls()
assert hdu.ver == 1
assert 'EXTVER' not in hdu.header
hdu.ver = 2
assert hdu.ver == 2
assert hdu.header['EXTVER'] == 2
# Passing name to constructor
hdu = hducls(ver=3)
assert hdu.ver == 3
assert hdu.header['EXTVER'] == 3
# And overriding a header with a different extver
hdr = fits.Header()
hdr['EXTVER'] = 4
hdu = hducls(header=hdr, ver=5)
assert hdu.ver == 5
assert hdu.header['EXTVER'] == 5
def test_unicode_colname(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5204
"Handle unicode FITS BinTable column names on Python 2"
"""
col = fits.Column(name='spam', format='E', array=[42.])
# This used to raise a TypeError, now it works
fits.BinTableHDU.from_columns([col])
def test_bin_table_with_logical_array(self):
c1 = fits.Column(name='flag', format='2L',
array=[[True, False], [False, True]])
coldefs = fits.ColDefs([c1])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
assert (tbhdu1.data.field('flag')[0] ==
np.array([True, False], dtype=bool)).all()
assert (tbhdu1.data.field('flag')[1] ==
np.array([False, True], dtype=bool)).all()
tbhdu = fits.BinTableHDU.from_columns(tbhdu1.data)
assert (tbhdu.data.field('flag')[0] ==
np.array([True, False], dtype=bool)).all()
assert (tbhdu.data.field('flag')[1] ==
np.array([False, True], dtype=bool)).all()
def test_fits_rec_column_access(self):
tbdata = fits.getdata(self.data('table.fits'))
assert (tbdata.V_mag == tbdata.field('V_mag')).all()
assert (tbdata.V_mag == tbdata['V_mag']).all()
# Table with scaling (c3) and tnull (c1)
tbdata = fits.getdata(self.data('tb.fits'))
for col in ('c1', 'c2', 'c3', 'c4'):
data = getattr(tbdata, col)
assert (data == tbdata.field(col)).all()
assert (data == tbdata[col]).all()
# ascii table
tbdata = fits.getdata(self.data('ascii.fits'))
for col in ('a', 'b'):
data = getattr(tbdata, col)
assert (data == tbdata.field(col)).all()
assert (data == tbdata[col]).all()
# with VLA column
col1 = fits.Column(name='x', format='PI()',
array=np.array([[45, 56], [11, 12, 13]],
dtype=np.object_))
hdu = fits.BinTableHDU.from_columns([col1])
assert type(hdu.data['x']) == type(hdu.data.x) # noqa
assert (hdu.data['x'][0] == hdu.data.x[0]).all()
assert (hdu.data['x'][1] == hdu.data.x[1]).all()
def test_table_with_zero_width_column(self):
hdul = fits.open(self.data('zerowidth.fits'))
tbhdu = hdul[2] # This HDU contains a zero-width column 'ORBPARM'
assert 'ORBPARM' in tbhdu.columns.names
# The ORBPARM column should not be in the data, though the data should
# be readable
assert 'ORBPARM' in tbhdu.data.names
assert 'ORBPARM' in tbhdu.data.dtype.names
# Verify that some of the data columns are still correctly accessible
# by name
assert tbhdu.data[0]['ANNAME'] == 'VLA:_W16'
assert comparefloats(
tbhdu.data[0]['STABXYZ'],
np.array([499.85566663, -1317.99231554, -735.18866164],
dtype=np.float64))
assert tbhdu.data[0]['NOSTA'] == 1
assert tbhdu.data[0]['MNTSTA'] == 0
assert tbhdu.data[-1]['ANNAME'] == 'VPT:_OUT'
assert comparefloats(
tbhdu.data[-1]['STABXYZ'],
np.array([0.0, 0.0, 0.0], dtype=np.float64))
assert tbhdu.data[-1]['NOSTA'] == 29
assert tbhdu.data[-1]['MNTSTA'] == 0
hdul.writeto(self.temp('newtable.fits'))
hdul.close()
hdul = fits.open(self.temp('newtable.fits'))
tbhdu = hdul[2]
# Verify that the previous tests still hold after writing
assert 'ORBPARM' in tbhdu.columns.names
assert 'ORBPARM' in tbhdu.data.names
assert 'ORBPARM' in tbhdu.data.dtype.names
assert tbhdu.data[0]['ANNAME'] == 'VLA:_W16'
assert comparefloats(
tbhdu.data[0]['STABXYZ'],
np.array([499.85566663, -1317.99231554, -735.18866164],
dtype=np.float64))
assert tbhdu.data[0]['NOSTA'] == 1
assert tbhdu.data[0]['MNTSTA'] == 0
assert tbhdu.data[-1]['ANNAME'] == 'VPT:_OUT'
assert comparefloats(
tbhdu.data[-1]['STABXYZ'],
np.array([0.0, 0.0, 0.0], dtype=np.float64))
assert tbhdu.data[-1]['NOSTA'] == 29
assert tbhdu.data[-1]['MNTSTA'] == 0
hdul.close()
def test_string_column_padding(self):
a = ['img1', 'img2', 'img3a', 'p']
s = 'img1\x00\x00\x00\x00\x00\x00' \
'img2\x00\x00\x00\x00\x00\x00' \
'img3a\x00\x00\x00\x00\x00' \
'p\x00\x00\x00\x00\x00\x00\x00\x00\x00'
acol = fits.Column(name='MEMNAME', format='A10',
array=chararray.array(a))
ahdu = fits.BinTableHDU.from_columns([acol])
assert ahdu.data.tobytes().decode('raw-unicode-escape') == s
ahdu.writeto(self.temp('newtable.fits'))
with fits.open(self.temp('newtable.fits')) as hdul:
assert hdul[1].data.tobytes().decode('raw-unicode-escape') == s
assert (hdul[1].data['MEMNAME'] == a).all()
del hdul
ahdu = fits.TableHDU.from_columns([acol])
ahdu.writeto(self.temp('newtable.fits'), overwrite=True)
with fits.open(self.temp('newtable.fits')) as hdul:
assert (hdul[1].data.tobytes().decode('raw-unicode-escape') ==
s.replace('\x00', ' '))
assert (hdul[1].data['MEMNAME'] == a).all()
ahdu = fits.BinTableHDU.from_columns(hdul[1].data.copy())
del hdul
# Now serialize once more as a binary table; padding bytes should
# revert to zeroes
ahdu.writeto(self.temp('newtable.fits'), overwrite=True)
with fits.open(self.temp('newtable.fits')) as hdul:
assert hdul[1].data.tobytes().decode('raw-unicode-escape') == s
assert (hdul[1].data['MEMNAME'] == a).all()
def test_multi_dimensional_columns(self):
"""
Tests the multidimensional column implementation with both numeric
arrays and string arrays.
"""
data = np.rec.array(
[([0, 1, 2, 3, 4, 5], 'row1' * 2),
([6, 7, 8, 9, 0, 1], 'row2' * 2),
([2, 3, 4, 5, 6, 7], 'row3' * 2)], formats='6i4,a8')
thdu = fits.BinTableHDU.from_columns(data)
thdu.writeto(self.temp('newtable.fits'))
with fits.open(self.temp('newtable.fits'), mode='update') as hdul:
# Modify the TDIM fields to my own specification
hdul[1].header['TDIM1'] = '(2,3)'
hdul[1].header['TDIM2'] = '(4,2)'
with fits.open(self.temp('newtable.fits')) as hdul:
thdu = hdul[1]
c1 = thdu.data.field(0)
c2 = thdu.data.field(1)
assert c1.shape == (3, 3, 2)
assert c2.shape == (3, 2)
assert (c1 == np.array([[[0, 1], [2, 3], [4, 5]],
[[6, 7], [8, 9], [0, 1]],
[[2, 3], [4, 5], [6, 7]]])).all()
assert (c2 == np.array([['row1', 'row1'],
['row2', 'row2'],
['row3', 'row3']])).all()
del c1
del c2
del thdu
del hdul
# Test setting the TDIMn header based on the column data
data = np.zeros(3, dtype=[('x', 'f4'), ('s', 'S5', 4)])
data['x'] = 1, 2, 3
data['s'] = 'ok'
fits.writeto(self.temp('newtable.fits'), data, overwrite=True)
t = fits.getdata(self.temp('newtable.fits'))
assert t.field(1).dtype.str[-1] == '5'
assert t.field(1).shape == (3, 4)
# Like the previous test, but with an extra dimension (a bit more
# complicated)
data = np.zeros(3, dtype=[('x', 'f4'), ('s', 'S5', (4, 3))])
data['x'] = 1, 2, 3
data['s'] = 'ok'
del t
fits.writeto(self.temp('newtable.fits'), data, overwrite=True)
t = fits.getdata(self.temp('newtable.fits'))
assert t.field(1).dtype.str[-1] == '5'
assert t.field(1).shape == (3, 4, 3)
def test_oned_array_single_element(self):
# a table with rows that are 1d arrays of a single value
data = np.array([(1, ), (2, )], dtype=([('x', 'i4', (1, ))]))
thdu = fits.BinTableHDU.from_columns(data)
thdu.writeto(self.temp('onedtable.fits'))
with fits.open(self.temp('onedtable.fits')) as hdul:
thdu = hdul[1]
c = thdu.data.field(0)
assert c.shape == (2, 1)
assert thdu.header['TDIM1'] == '(1)'
def test_bin_table_init_from_string_array_column(self):
"""
Tests two ways of creating a new `BinTableHDU` from a column of
string arrays.
This tests for a couple different regressions, and ensures that
both BinTableHDU(data=arr) and BinTableHDU.from_columns(arr) work
equivalently.
Some of this is redundant with the following test, but checks some
subtly different cases.
"""
data = [[b'abcd', b'efgh'],
[b'ijkl', b'mnop'],
[b'qrst', b'uvwx']]
arr = np.array([(data,), (data,), (data,), (data,), (data,)],
dtype=[('S', '(3, 2)S4')])
tbhdu1 = fits.BinTableHDU(data=arr)
def test_dims_and_roundtrip(tbhdu):
assert tbhdu.data['S'].shape == (5, 3, 2)
assert tbhdu.data['S'].dtype.str.endswith('U4')
tbhdu.writeto(self.temp('test.fits'), overwrite=True)
with fits.open(self.temp('test.fits')) as hdul:
tbhdu2 = hdul[1]
assert tbhdu2.header['TDIM1'] == '(4,2,3)'
assert tbhdu2.data['S'].shape == (5, 3, 2)
assert tbhdu.data['S'].dtype.str.endswith('U4')
assert np.all(tbhdu2.data['S'] == tbhdu.data['S'])
test_dims_and_roundtrip(tbhdu1)
tbhdu2 = fits.BinTableHDU.from_columns(arr)
test_dims_and_roundtrip(tbhdu2)
def test_columns_with_truncating_tdim(self):
"""
According to the FITS standard (section 7.3.2):
If the number of elements in the array implied by the TDIMn is less
than the allocated size of the ar- ray in the FITS file, then the
unused trailing elements should be interpreted as containing
undefined fill values.
*deep sigh* What this means is if a column has a repeat count larger
than the number of elements indicated by its TDIM (ex: TDIM1 = '(2,2)',
but TFORM1 = 6I), then instead of this being an outright error we are
to take the first 4 elements as implied by the TDIM and ignore the
additional two trailing elements.
"""
# It's hard to even successfully create a table like this. I think
# it *should* be difficult, but once created it should at least be
# possible to read.
arr1 = [[b'ab', b'cd'], [b'ef', b'gh'], [b'ij', b'kl']]
arr2 = [1, 2, 3, 4, 5]
arr = np.array([(arr1, arr2), (arr1, arr2)],
dtype=[('a', '(3, 2)S2'), ('b', '5i8')])
tbhdu = fits.BinTableHDU(data=arr)
tbhdu.writeto(self.temp('test.fits'))
with open(self.temp('test.fits'), 'rb') as f:
raw_bytes = f.read()
# Artificially truncate TDIM in the header; this seems to be the
# easiest way to do this while getting around Astropy's insistence on the
# data and header matching perfectly; again, we have no interest in
# making it possible to write files in this format, only read them
with open(self.temp('test.fits'), 'wb') as f:
f.write(raw_bytes.replace(b'(2,2,3)', b'(2,2,2)'))
with fits.open(self.temp('test.fits')) as hdul:
tbhdu2 = hdul[1]
assert tbhdu2.header['TDIM1'] == '(2,2,2)'
assert tbhdu2.header['TFORM1'] == '12A'
for row in tbhdu2.data:
assert np.all(row['a'] == [['ab', 'cd'], ['ef', 'gh']])
assert np.all(row['b'] == [1, 2, 3, 4, 5])
def test_string_array_round_trip(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/201"""
data = [['abc', 'def', 'ghi'],
['jkl', 'mno', 'pqr'],
['stu', 'vwx', 'yz ']]
recarr = np.rec.array([(data,), (data,)], formats=['(3,3)S3'])
t = fits.BinTableHDU(data=recarr)
t.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
assert 'TDIM1' in h[1].header
assert h[1].header['TDIM1'] == '(3,3,3)'
assert len(h[1].data) == 2
assert len(h[1].data[0]) == 1
assert (h[1].data.field(0)[0] ==
np.char.decode(recarr.field(0)[0], 'ascii')).all()
with fits.open(self.temp('test.fits')) as h:
# Access the data; I think this is necessary to exhibit the bug
# reported in https://aeon.stsci.edu/ssb/trac/pyfits/ticket/201
h[1].data[:]
h.writeto(self.temp('test2.fits'))
with fits.open(self.temp('test2.fits')) as h:
assert 'TDIM1' in h[1].header
assert h[1].header['TDIM1'] == '(3,3,3)'
assert len(h[1].data) == 2
assert len(h[1].data[0]) == 1
assert (h[1].data.field(0)[0] ==
np.char.decode(recarr.field(0)[0], 'ascii')).all()
def test_new_table_with_nd_column(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/3
"""
arra = np.array(['a', 'b'], dtype='|S1')
arrb = np.array([['a', 'bc'], ['cd', 'e']], dtype='|S2')
arrc = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
cols = [
fits.Column(name='str', format='1A', array=arra),
fits.Column(name='strarray', format='4A', dim='(2,2)',
array=arrb),
fits.Column(name='intarray', format='4I', dim='(2, 2)',
array=arrc)
]
hdu = fits.BinTableHDU.from_columns(fits.ColDefs(cols))
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
# Need to force string arrays to byte arrays in order to compare
# correctly on Python 3
assert (h[1].data['str'].encode('ascii') == arra).all()
assert (h[1].data['strarray'].encode('ascii') == arrb).all()
assert (h[1].data['intarray'] == arrc).all()
def test_mismatched_tform_and_tdim(self):
"""Normally the product of the dimensions listed in a TDIMn keyword
must be less than or equal to the repeat count in the TFORMn keyword.
This tests that this works if less than (treating the trailing bytes
as unspecified fill values per the FITS standard) and fails if the
dimensions specified by TDIMn are greater than the repeat count.
"""
arra = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
arrb = np.array([[[9, 10], [11, 12]], [[13, 14], [15, 16]]])
cols = [fits.Column(name='a', format='20I', dim='(2,2)',
array=arra),
fits.Column(name='b', format='4I', dim='(2,2)',
array=arrb)]
# The first column has the mismatched repeat count
hdu = fits.BinTableHDU.from_columns(fits.ColDefs(cols))
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
assert h[1].header['TFORM1'] == '20I'
assert h[1].header['TFORM2'] == '4I'
assert h[1].header['TDIM1'] == h[1].header['TDIM2'] == '(2,2)'
assert (h[1].data['a'] == arra).all()
assert (h[1].data['b'] == arrb).all()
assert h[1].data.itemsize == 48 # 16-bits times 24
# If dims is more than the repeat count in the format specifier raise
# an error
pytest.raises(VerifyError, fits.Column, name='a', format='2I',
dim='(2,2)', array=arra)
def test_tdim_of_size_one(self):
"""Regression test for https://github.com/astropy/astropy/pull/3580"""
with fits.open(self.data('tdim.fits')) as hdulist:
assert hdulist[1].data['V_mag'].shape == (3, 1, 1)
def test_slicing(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/52"""
with fits.open(self.data('table.fits')) as f:
data = f[1].data
targets = data.field('target')
s = data[:]
assert (s.field('target') == targets).all()
for n in range(len(targets) + 2):
s = data[:n]
assert (s.field('target') == targets[:n]).all()
s = data[n:]
assert (s.field('target') == targets[n:]).all()
s = data[::2]
assert (s.field('target') == targets[::2]).all()
s = data[::-1]
assert (s.field('target') == targets[::-1]).all()
def test_array_slicing(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/55"""
with fits.open(self.data('table.fits')) as f:
data = f[1].data
s1 = data[data['target'] == 'NGC1001']
s2 = data[np.where(data['target'] == 'NGC1001')]
s3 = data[[0]]
s4 = data[:1]
for s in [s1, s2, s3, s4]:
assert isinstance(s, fits.FITS_rec)
assert comparerecords(s1, s2)
assert comparerecords(s2, s3)
assert comparerecords(s3, s4)
def test_array_broadcasting(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/pull/48
"""
with fits.open(self.data('table.fits')) as hdu:
data = hdu[1].data
data['V_mag'] = 0
assert np.all(data['V_mag'] == 0)
data['V_mag'] = 1
assert np.all(data['V_mag'] == 1)
for container in (list, tuple, np.array):
data['V_mag'] = container([1, 2, 3])
assert np.array_equal(data['V_mag'], np.array([1, 2, 3]))
def test_array_slicing_readonly(self):
"""
Like test_array_slicing but with the file opened in 'readonly' mode.
Regression test for a crash when slicing readonly memmap'd tables.
"""
with fits.open(self.data('table.fits'), mode='readonly') as f:
data = f[1].data
s1 = data[data['target'] == 'NGC1001']
s2 = data[np.where(data['target'] == 'NGC1001')]
s3 = data[[0]]
s4 = data[:1]
for s in [s1, s2, s3, s4]:
assert isinstance(s, fits.FITS_rec)
assert comparerecords(s1, s2)
assert comparerecords(s2, s3)
assert comparerecords(s3, s4)
@pytest.mark.parametrize('tablename', ['table.fits', 'tb.fits'])
def test_dump_load_round_trip(self, tablename):
"""
A simple test of the dump/load methods; dump the data, column, and
header files and try to reload the table from them.
"""
with fits.open(self.data(tablename)) as hdul:
tbhdu = hdul[1]
datafile = self.temp('data.txt')
cdfile = self.temp('coldefs.txt')
hfile = self.temp('header.txt')
tbhdu.dump(datafile, cdfile, hfile)
new_tbhdu = fits.BinTableHDU.load(datafile, cdfile, hfile)
assert comparerecords(tbhdu.data, new_tbhdu.data)
_assert_attr_col(new_tbhdu, hdul[1])
def test_dump_load_array_colums(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/22
Ensures that a table containing a multi-value array column can be
dumped and loaded successfully.
"""
data = np.rec.array([('a', [1, 2, 3, 4], 0.1),
('b', [5, 6, 7, 8], 0.2)],
formats='a1,4i4,f8')
tbhdu = fits.BinTableHDU.from_columns(data)
datafile = self.temp('data.txt')
cdfile = self.temp('coldefs.txt')
hfile = self.temp('header.txt')
tbhdu.dump(datafile, cdfile, hfile)
new_tbhdu = fits.BinTableHDU.load(datafile, cdfile, hfile)
assert comparerecords(tbhdu.data, new_tbhdu.data)
assert str(tbhdu.header) == str(new_tbhdu.header)
def test_load_guess_format(self):
"""
Tests loading a table dump with no supplied coldefs or header, so that
the table format has to be guessed at. There is of course no exact
science to this; the table that's produced simply uses sensible guesses
for that format. Ideally this should never have to be used.
"""
# Create a table containing a variety of data types.
a0 = np.array([False, True, False], dtype=bool)
c0 = fits.Column(name='c0', format='L', array=a0)
# Format X currently not supported by the format
# a1 = np.array([[0], [1], [0]], dtype=np.uint8)
# c1 = fits.Column(name='c1', format='X', array=a1)
a2 = np.array([1, 128, 255], dtype=np.uint8)
c2 = fits.Column(name='c2', format='B', array=a2)
a3 = np.array([-30000, 1, 256], dtype=np.int16)
c3 = fits.Column(name='c3', format='I', array=a3)
a4 = np.array([-123123123, 1234, 123123123], dtype=np.int32)
c4 = fits.Column(name='c4', format='J', array=a4)
a5 = np.array(['a', 'abc', 'ab'])
c5 = fits.Column(name='c5', format='A3', array=a5)
a6 = np.array([1.1, 2.2, 3.3], dtype=np.float64)
c6 = fits.Column(name='c6', format='D', array=a6)
a7 = np.array([1.1 + 2.2j, 3.3 + 4.4j, 5.5 + 6.6j],
dtype=np.complex128)
c7 = fits.Column(name='c7', format='M', array=a7)
a8 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
c8 = fits.Column(name='c8', format='PJ()', array=a8)
tbhdu = fits.BinTableHDU.from_columns([c0, c2, c3, c4, c5, c6, c7, c8])
datafile = self.temp('data.txt')
tbhdu.dump(datafile)
new_tbhdu = fits.BinTableHDU.load(datafile)
# In this particular case the record data at least should be equivalent
assert comparerecords(tbhdu.data, new_tbhdu.data)
def test_attribute_field_shadowing(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/86
Numpy recarray objects have a poorly-considered feature of allowing
field access by attribute lookup. However, if a field name coincides
with an existing attribute/method of the array, the existing name takes
presence (making the attribute-based field lookup completely unreliable
in general cases).
This ensures that any FITS_rec attributes still work correctly even
when there is a field with the same name as that attribute.
"""
c1 = fits.Column(name='names', format='I', array=[1])
c2 = fits.Column(name='formats', format='I', array=[2])
c3 = fits.Column(name='other', format='I', array=[3])
t = fits.BinTableHDU.from_columns([c1, c2, c3])
assert t.data.names == ['names', 'formats', 'other']
assert t.data.formats == ['I'] * 3
assert (t.data['names'] == [1]).all()
assert (t.data['formats'] == [2]).all()
assert (t.data.other == [3]).all()
def test_table_from_bool_fields(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/113
Tests creating a table from a recarray containing numpy.bool columns.
"""
array = np.rec.array([(True, False), (False, True)], formats='|b1,|b1')
thdu = fits.BinTableHDU.from_columns(array)
assert thdu.columns.formats == ['L', 'L']
assert comparerecords(thdu.data, array)
# Test round trip
thdu.writeto(self.temp('table.fits'))
data = fits.getdata(self.temp('table.fits'), ext=1)
assert thdu.columns.formats == ['L', 'L']
assert comparerecords(data, array)
def test_table_from_bool_fields2(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/215
Tests the case where a multi-field ndarray (not a recarray) containing
a bool field is used to initialize a `BinTableHDU`.
"""
arr = np.array([(False,), (True,), (False,)], dtype=[('a', '?')])
hdu = fits.BinTableHDU(data=arr)
assert (hdu.data['a'] == arr['a']).all()
def test_bool_column_update(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/139"""
c1 = fits.Column('F1', 'L', array=[True, False])
c2 = fits.Column('F2', 'L', array=[False, True])
thdu = fits.BinTableHDU.from_columns(fits.ColDefs([c1, c2]))
thdu.writeto(self.temp('table.fits'))
with fits.open(self.temp('table.fits'), mode='update') as hdul:
hdul[1].data['F1'][1] = True
hdul[1].data['F2'][0] = True
with fits.open(self.temp('table.fits')) as hdul:
assert (hdul[1].data['F1'] == [True, True]).all()
assert (hdul[1].data['F2'] == [True, True]).all()
def test_missing_tnull(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/197"""
c = fits.Column('F1', 'A3', null='---',
array=np.array(['1.0', '2.0', '---', '3.0']),
ascii=True)
table = fits.TableHDU.from_columns([c])
table.writeto(self.temp('test.fits'))
# Now let's delete the TNULL1 keyword, making this essentially
# unreadable
with fits.open(self.temp('test.fits'), mode='update') as h:
h[1].header['TFORM1'] = 'E3'
del h[1].header['TNULL1']
with fits.open(self.temp('test.fits')) as h:
pytest.raises(ValueError, lambda: h[1].data['F1'])
try:
with fits.open(self.temp('test.fits')) as h:
h[1].data['F1']
except ValueError as e:
assert str(e).endswith(
"the header may be missing the necessary TNULL1 "
"keyword or the table contains invalid data")
def test_blank_field_zero(self):
"""Regression test for https://github.com/astropy/astropy/issues/5134
Blank values in numerical columns of ASCII tables should be replaced
with zeros, so they can be loaded into numpy arrays.
When a TNULL value is set and there are blank fields not equal to that
value, they should be replaced with zeros.
"""
# Test an integer column with blank string as null
nullval1 = ' '
c1 = fits.Column('F1', format='I8', null=nullval1,
array=np.array([0, 1, 2, 3, 4]),
ascii=True)
table = fits.TableHDU.from_columns([c1])
table.writeto(self.temp('ascii_null.fits'))
# Replace the 1st col, 3rd row, with a null field.
with open(self.temp('ascii_null.fits'), mode='r+') as h:
nulled = h.read().replace('2 ', ' ')
h.seek(0)
h.write(nulled)
with fits.open(self.temp('ascii_null.fits'), memmap=True) as f:
assert f[1].data[2][0] == 0
# Test a float column with a null value set and blank fields.
nullval2 = 'NaN'
c2 = fits.Column('F1', format='F12.8', null=nullval2,
array=np.array([1.0, 2.0, 3.0, 4.0]),
ascii=True)
table = fits.TableHDU.from_columns([c2])
table.writeto(self.temp('ascii_null2.fits'))
# Replace the 1st col, 3rd row, with a null field.
with open(self.temp('ascii_null2.fits'), mode='r+') as h:
nulled = h.read().replace('3.00000000', ' ')
h.seek(0)
h.write(nulled)
with fits.open(self.temp('ascii_null2.fits'), memmap=True) as f:
# (Currently it should evaluate to 0.0, but if a TODO in fitsrec is
# completed, then it should evaluate to NaN.)
assert f[1].data[2][0] == 0.0 or np.isnan(f[1].data[2][0])
def test_column_array_type_mismatch(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/218"""
arr = [-99] * 20
col = fits.Column('mag', format='E', array=arr)
assert (arr == col.array).all()
def test_table_none(self):
"""Regression test
for https://github.com/spacetelescope/PyFITS/issues/27
"""
with fits.open(self.data('tb.fits')) as h:
h[1].data
h[1].data = None
assert isinstance(h[1].data, fits.FITS_rec)
assert len(h[1].data) == 0
h[1].writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
assert h[1].header['NAXIS'] == 2
assert h[1].header['NAXIS1'] == 12
assert h[1].header['NAXIS2'] == 0
assert isinstance(h[1].data, fits.FITS_rec)
assert len(h[1].data) == 0
def test_unncessary_table_load(self):
"""Test unnecessary parsing and processing of FITS tables when writing
directly from one FITS file to a new file without first reading the
data for user manipulation.
In other words, it should be possible to do a direct copy of the raw
data without unnecessary processing of the data.
"""
with fits.open(self.data('table.fits')) as h:
h[1].writeto(self.temp('test.fits'))
# Since this was a direct copy the h[1].data attribute should not have
# even been accessed (since this means the data was read and parsed)
assert 'data' not in h[1].__dict__
with fits.open(self.data('table.fits')) as h1:
with fits.open(self.temp('test.fits')) as h2:
assert str(h1[1].header) == str(h2[1].header)
assert comparerecords(h1[1].data, h2[1].data)
def test_table_from_columns_of_other_table(self):
"""Tests a rare corner case where the columns of an existing table
are used to create a new table with the new_table function. In this
specific case, however, the existing table's data has not been read
yet, so new_table has to get at it through the Delayed proxy.
Note: Although this previously tested new_table it now uses
BinTableHDU.from_columns directly, around which new_table is a mere
wrapper.
"""
hdul = fits.open(self.data('table.fits'))
# Make sure the column array is in fact delayed...
assert isinstance(hdul[1].columns._arrays[0], Delayed)
# Create a new table...
t = fits.BinTableHDU.from_columns(hdul[1].columns)
# The original columns should no longer be delayed...
assert not isinstance(hdul[1].columns._arrays[0], Delayed)
t.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul2:
assert comparerecords(hdul[1].data, hdul2[1].data)
hdul.close()
def test_bintable_to_asciitable(self):
"""Tests initializing a TableHDU with the data from a BinTableHDU."""
with fits.open(self.data('tb.fits')) as hdul:
tbdata = hdul[1].data
tbhdu = fits.TableHDU(data=tbdata)
tbhdu.writeto(self.temp('test.fits'), overwrite=True)
with fits.open(self.temp('test.fits')) as hdul2:
tbdata2 = hdul2[1].data
assert np.all(tbdata['c1'] == tbdata2['c1'])
assert np.all(tbdata['c2'] == tbdata2['c2'])
# c3 gets converted from float32 to float64 when writing
# test.fits, so cast to float32 before testing that the correct
# value is retrieved
assert np.all(tbdata['c3'].astype(np.float32) ==
tbdata2['c3'].astype(np.float32))
# c4 is a boolean column in the original table; we want ASCII
# columns to convert these to columns of 'T'/'F' strings
assert np.all(np.where(tbdata['c4'], 'T', 'F') ==
tbdata2['c4'])
def test_pickle(self):
"""
Regression test for https://github.com/astropy/astropy/issues/1597
Tests for pickling FITS_rec objects
"""
# open existing FITS tables (images pickle by default, no test needed):
with fits.open(self.data('tb.fits')) as btb:
# Test column array is delayed and can pickle
assert isinstance(btb[1].columns._arrays[0], Delayed)
btb_pd = pickle.dumps(btb[1].data)
btb_pl = pickle.loads(btb_pd)
# It should not be delayed any more
assert not isinstance(btb[1].columns._arrays[0], Delayed)
assert comparerecords(btb_pl, btb[1].data)
with fits.open(self.data('ascii.fits')) as asc:
asc_pd = pickle.dumps(asc[1].data)
asc_pl = pickle.loads(asc_pd)
assert comparerecords(asc_pl, asc[1].data)
with fits.open(self.data('random_groups.fits')) as rgr:
rgr_pd = pickle.dumps(rgr[0].data)
rgr_pl = pickle.loads(rgr_pd)
assert comparerecords(rgr_pl, rgr[0].data)
with fits.open(self.data('zerowidth.fits')) as zwc:
# Doesn't pickle zero-width (_phanotm) column 'ORBPARM'
zwc_pd = pickle.dumps(zwc[2].data)
zwc_pl = pickle.loads(zwc_pd)
with pytest.warns(UserWarning, match='Field 2 has a repeat count of 0'):
assert comparerecords(zwc_pl, zwc[2].data)
def test_zero_length_table(self):
array = np.array([], dtype=[
('a', 'i8'),
('b', 'S64'),
('c', ('i4', (3, 2)))])
hdu = fits.BinTableHDU(array)
assert hdu.header['NAXIS1'] == 96
assert hdu.header['NAXIS2'] == 0
assert hdu.header['TDIM3'] == '(2,3)'
field = hdu.data.field(1)
assert field.shape == (0,)
def test_dim_column_byte_order_mismatch(self):
"""
When creating a table column with non-trivial TDIMn, and
big-endian array data read from an existing FITS file, the data
should not be unnecessarily byteswapped.
Regression test for https://github.com/astropy/astropy/issues/3561
"""
data = fits.getdata(self.data('random_groups.fits'))['DATA']
col = fits.Column(name='TEST', array=data, dim='(3,1,128,1,1)',
format='1152E')
thdu = fits.BinTableHDU.from_columns([col])
thdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert np.all(hdul[1].data['TEST'] == data)
def test_fits_rec_from_existing(self):
"""
Tests creating a `FITS_rec` object with `FITS_rec.from_columns`
from an existing `FITS_rec` object read from a FITS file.
This ensures that the per-column arrays are updated properly.
Regression test for https://github.com/spacetelescope/PyFITS/issues/99
"""
# The use case that revealed this problem was trying to create a new
# table from an existing table, but with additional rows so that we can
# append data from a second table (with the same column structure)
data1 = fits.getdata(self.data('tb.fits'))
data2 = fits.getdata(self.data('tb.fits'))
nrows = len(data1) + len(data2)
merged = fits.FITS_rec.from_columns(data1, nrows=nrows)
merged[len(data1):] = data2
mask = merged['c1'] > 1
masked = merged[mask]
# The test table only has two rows, only the second of which is > 1 for
# the 'c1' column
assert comparerecords(data1[1:], masked[:1])
assert comparerecords(data1[1:], masked[1:])
# Double check that the original data1 table hasn't been affected by
# its use in creating the "merged" table
assert comparerecords(data1, fits.getdata(self.data('tb.fits')))
def test_update_string_column_inplace(self):
"""
Regression test for https://github.com/astropy/astropy/issues/4452
Ensure that changes to values in a string column are saved when
a file is opened in ``mode='update'``.
"""
data = np.array([('abc',)], dtype=[('a', 'S3')])
fits.writeto(self.temp('test.fits'), data)
with fits.open(self.temp('test.fits'), mode='update') as hdul:
hdul[1].data['a'][0] = 'XYZ'
assert hdul[1].data['a'][0] == 'XYZ'
with fits.open(self.temp('test.fits')) as hdul:
assert hdul[1].data['a'][0] == 'XYZ'
# Test update but with a non-trivial TDIMn
data = np.array([([['abc', 'def', 'geh'],
['ijk', 'lmn', 'opq']],)],
dtype=[('a', ('S3', (2, 3)))])
fits.writeto(self.temp('test2.fits'), data)
expected = [['abc', 'def', 'geh'],
['ijk', 'XYZ', 'opq']]
with fits.open(self.temp('test2.fits'), mode='update') as hdul:
assert hdul[1].header['TDIM1'] == '(3,3,2)'
# Note: Previously I wrote data['a'][0][1, 1] to address
# the single row. However, this is broken for chararray because
# data['a'][0] does *not* return a view of the original array--this
# is a bug in chararray though and not a bug in any FITS-specific
# code so we'll roll with it for now...
# (by the way the bug in question is fixed in newer Numpy versions)
hdul[1].data['a'][0, 1, 1] = 'XYZ'
assert np.all(hdul[1].data['a'][0] == expected)
with fits.open(self.temp('test2.fits')) as hdul:
assert hdul[1].header['TDIM1'] == '(3,3,2)'
assert np.all(hdul[1].data['a'][0] == expected)
@pytest.mark.skipif('not HAVE_OBJGRAPH')
def test_reference_leak(self):
"""Regression test for https://github.com/astropy/astropy/pull/520"""
def readfile(filename):
with fits.open(filename) as hdul:
data = hdul[1].data.copy()
for colname in data.dtype.names:
data[colname]
with _refcounting('FITS_rec'):
readfile(self.data('memtest.fits'))
@pytest.mark.skipif('not HAVE_OBJGRAPH')
@pytest.mark.slow
def test_reference_leak2(self, tmpdir):
"""
Regression test for https://github.com/astropy/astropy/pull/4539
This actually re-runs a small set of tests that I found, during
careful testing, exhibited the reference leaks fixed by #4539, but
now with reference counting around each test to ensure that the
leaks are fixed.
"""
from .test_core import TestCore
from .test_connect import TestMultipleHDU
t1 = TestCore()
t1.setup()
try:
with _refcounting('FITS_rec'):
t1.test_add_del_columns2()
finally:
t1.teardown()
del t1
t2 = self.__class__()
for test_name in ['test_recarray_to_bintablehdu',
'test_numpy_ndarray_to_bintablehdu',
'test_new_table_from_recarray',
'test_new_fitsrec']:
t2.setup()
try:
with _refcounting('FITS_rec'):
getattr(t2, test_name)()
finally:
t2.teardown()
del t2
t3 = TestMultipleHDU()
t3.setup_class()
try:
with _refcounting('FITS_rec'):
t3.test_read(tmpdir)
finally:
t3.teardown_class()
del t3
def test_dump_overwrite(self):
with fits.open(self.data('table.fits')) as hdul:
tbhdu = hdul[1]
datafile = self.temp('data.txt')
cdfile = self.temp('coldefs.txt')
hfile = self.temp('header.txt')
tbhdu.dump(datafile, cdfile, hfile)
msg = (r"File .* already exists\. File .* already exists\. File "
r".* already exists\. If you mean to replace the "
r"file\(s\) then use the argument 'overwrite=True'\.")
with pytest.raises(OSError, match=msg):
tbhdu.dump(datafile, cdfile, hfile)
tbhdu.dump(datafile, cdfile, hfile, overwrite=True)
def test_pseudo_unsigned_ints(self):
"""
Tests updating a table column containing pseudo-unsigned ints.
"""
data = np.array([1, 2, 3], dtype=np.uint32)
col = fits.Column(name='A', format='1J', bzero=2**31, array=data)
thdu = fits.BinTableHDU.from_columns([col])
thdu.writeto(self.temp('test.fits'))
# Test that the file wrote out correctly
with fits.open(self.temp('test.fits'), uint=True) as hdul:
hdu = hdul[1]
assert 'TZERO1' in hdu.header
assert hdu.header['TZERO1'] == 2**31
assert hdu.data['A'].dtype == np.dtype('uint32')
assert np.all(hdu.data['A'] == data)
# Test updating the unsigned int data
hdu.data['A'][0] = 99
hdu.writeto(self.temp('test2.fits'))
with fits.open(self.temp('test2.fits'), uint=True) as hdul:
hdu = hdul[1]
assert 'TZERO1' in hdu.header
assert hdu.header['TZERO1'] == 2**31
assert hdu.data['A'].dtype == np.dtype('uint32')
assert np.all(hdu.data['A'] == [99, 2, 3])
def test_column_with_scaling(self):
"""Check that a scaled column if correctly saved once it is modified.
Regression test for https://github.com/astropy/astropy/issues/6887
"""
c1 = fits.Column(name='c1', array=np.array([1], dtype='>i2'),
format='1I', bscale=1, bzero=32768)
S = fits.HDUList([fits.PrimaryHDU(),
fits.BinTableHDU.from_columns([c1])])
# Change value in memory
S[1].data['c1'][0] = 2
S.writeto(self.temp("a.fits"))
assert S[1].data['c1'] == 2
# Read and change value in memory
with fits.open(self.temp("a.fits")) as X:
X[1].data['c1'][0] = 10
assert X[1].data['c1'][0] == 10
# Write back to file
X.writeto(self.temp("b.fits"))
# Now check the file
with fits.open(self.temp("b.fits")) as hdul:
assert hdul[1].data['c1'][0] == 10
def test_ascii_inttypes(self):
"""
Test correct integer dtypes according to ASCII table field widths.
Regression for https://github.com/astropy/astropy/issues/9899
"""
i08 = np.array([2**3, 2**23, -2**22, 10, 2**23], dtype='i4')
i10 = np.array([2**8, 2**31-1, -2**29, 30, 2**31-1], dtype='i8')
i20 = np.array([2**16, 2**63-1, -2**63, 40, 2**63-1], dtype='i8')
i02 = np.array([2**8, 2**13, -2**9, 50, 2**13], dtype='i2')
t0 = Table([i08, i08*2, i10, i20, i02])
t1 = Table.read(self.data('ascii_i4-i20.fits'))
assert t1.dtype == t0.dtype
assert comparerecords(t1, t0)
@contextlib.contextmanager
def _refcounting(type_):
"""
Perform the body of a with statement with reference counting for the
given type (given by class name)--raises an assertion error if there
are more unfreed objects of the given type than when we entered the
with statement.
"""
gc.collect()
refcount = len(objgraph.by_type(type_))
yield refcount
gc.collect()
assert len(objgraph.by_type(type_)) <= refcount, \
"More {0!r} objects still in memory than before."
class TestVLATables(FitsTestCase):
"""Tests specific to tables containing variable-length arrays."""
def test_variable_length_columns(self):
def test(format_code):
col = fits.Column(name='QUAL_SPE', format=format_code,
array=[[0] * 1571] * 225)
tb_hdu = fits.BinTableHDU.from_columns([col])
pri_hdu = fits.PrimaryHDU()
hdu_list = fits.HDUList([pri_hdu, tb_hdu])
hdu_list.writeto(self.temp('toto.fits'), overwrite=True)
with fits.open(self.temp('toto.fits')) as toto:
q = toto[1].data.field('QUAL_SPE')
assert (q[0][4:8] ==
np.array([0, 0, 0, 0], dtype=np.uint8)).all()
assert toto[1].columns[0].format.endswith('J(1571)')
for code in ('PJ()', 'QJ()'):
test(code)
def test_extend_variable_length_array(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/54"""
def test(format_code):
arr = [[1] * 10] * 10
col1 = fits.Column(name='TESTVLF', format=format_code, array=arr)
col2 = fits.Column(name='TESTSCA', format='J', array=[1] * 10)
tb_hdu = fits.BinTableHDU.from_columns([col1, col2], nrows=15)
# This asserts that the normal 'scalar' column's length was extended
assert len(tb_hdu.data['TESTSCA']) == 15
# And this asserts that the VLF column was extended in the same manner
assert len(tb_hdu.data['TESTVLF']) == 15
# We can't compare the whole array since the _VLF is an array of
# objects, but comparing just the edge case rows should suffice
assert (tb_hdu.data['TESTVLF'][0] == arr[0]).all()
assert (tb_hdu.data['TESTVLF'][9] == arr[9]).all()
assert (tb_hdu.data['TESTVLF'][10] == ([0] * 10)).all()
assert (tb_hdu.data['TESTVLF'][-1] == ([0] * 10)).all()
for code in ('PJ()', 'QJ()'):
test(code)
def test_variable_length_table_format_pd_from_object_array(self):
def test(format_code):
a = np.array([np.array([7.2e-20, 7.3e-20]), np.array([0.0]),
np.array([0.0])], 'O')
acol = fits.Column(name='testa', format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
tbhdu.writeto(self.temp('newtable.fits'), overwrite=True)
with fits.open(self.temp('newtable.fits')) as tbhdu1:
assert tbhdu1[1].columns[0].format.endswith('D(2)')
for j in range(3):
for i in range(len(a[j])):
assert tbhdu1[1].data.field(0)[j][i] == a[j][i]
for code in ('PD()', 'QD()'):
test(code)
def test_variable_length_table_format_pd_from_list(self):
def test(format_code):
a = [np.array([7.2e-20, 7.3e-20]), np.array([0.0]),
np.array([0.0])]
acol = fits.Column(name='testa', format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
tbhdu.writeto(self.temp('newtable.fits'), overwrite=True)
with fits.open(self.temp('newtable.fits')) as tbhdu1:
assert tbhdu1[1].columns[0].format.endswith('D(2)')
for j in range(3):
for i in range(len(a[j])):
assert tbhdu1[1].data.field(0)[j][i] == a[j][i]
for code in ('PD()', 'QD()'):
test(code)
def test_variable_length_table_format_pa_from_object_array(self):
def test(format_code):
a = np.array([np.array(['a', 'b', 'c']), np.array(['d', 'e']),
np.array(['f'])], 'O')
acol = fits.Column(name='testa', format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
tbhdu.writeto(self.temp('newtable.fits'), overwrite=True)
with fits.open(self.temp('newtable.fits')) as hdul:
assert hdul[1].columns[0].format.endswith('A(3)')
for j in range(3):
for i in range(len(a[j])):
assert hdul[1].data.field(0)[j][i] == a[j][i]
for code in ('PA()', 'QA()'):
test(code)
def test_variable_length_table_format_pa_from_list(self):
def test(format_code):
a = ['a', 'ab', 'abc']
acol = fits.Column(name='testa', format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
tbhdu.writeto(self.temp('newtable.fits'), overwrite=True)
with fits.open(self.temp('newtable.fits')) as hdul:
assert hdul[1].columns[0].format.endswith('A(3)')
for j in range(3):
for i in range(len(a[j])):
assert hdul[1].data.field(0)[j][i] == a[j][i]
for code in ('PA()', 'QA()'):
test(code)
def test_getdata_vla(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/200"""
def test(format_code):
col = fits.Column(name='QUAL_SPE', format=format_code,
array=[np.arange(1572)] * 225)
tb_hdu = fits.BinTableHDU.from_columns([col])
pri_hdu = fits.PrimaryHDU()
hdu_list = fits.HDUList([pri_hdu, tb_hdu])
hdu_list.writeto(self.temp('toto.fits'), overwrite=True)
data = fits.getdata(self.temp('toto.fits'))
# Need to compare to the original data row by row since the FITS_rec
# returns an array of _VLA objects
for row_a, row_b in zip(data['QUAL_SPE'], col.array):
assert (row_a == row_b).all()
for code in ('PJ()', 'QJ()'):
test(code)
@pytest.mark.skipif(not NUMPY_LT_1_22 and NUMPY_LT_1_22_1 and sys.platform == 'win32',
reason='https://github.com/numpy/numpy/issues/20699')
def test_copy_vla(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/47
"""
# Make a file containing a couple of VLA tables
arr1 = [np.arange(n + 1) for n in range(255)]
arr2 = [np.arange(255, 256 + n) for n in range(255)]
# A dummy non-VLA column needed to reproduce issue #47
c = fits.Column('test', format='J', array=np.arange(255))
c1 = fits.Column('A', format='PJ', array=arr1)
c2 = fits.Column('B', format='PJ', array=arr2)
t1 = fits.BinTableHDU.from_columns([c, c1])
t2 = fits.BinTableHDU.from_columns([c, c2])
hdul = fits.HDUList([fits.PrimaryHDU(), t1, t2])
hdul.writeto(self.temp('test.fits'), overwrite=True)
# Just test that the test file wrote out correctly
with fits.open(self.temp('test.fits')) as h:
assert h[1].header['TFORM2'] == 'PJ(255)'
assert h[2].header['TFORM2'] == 'PJ(255)'
assert comparerecords(h[1].data, t1.data)
assert comparerecords(h[2].data, t2.data)
# Try copying the second VLA and writing to a new file
with fits.open(self.temp('test.fits')) as h:
new_hdu = fits.BinTableHDU(data=h[2].data, header=h[2].header)
new_hdu.writeto(self.temp('test3.fits'))
with fits.open(self.temp('test3.fits')) as h2:
assert comparerecords(h2[1].data, t2.data)
new_hdul = fits.HDUList([fits.PrimaryHDU()])
new_hdul.writeto(self.temp('test2.fits'))
# Open several copies of the test file and append copies of the second
# VLA table
with fits.open(self.temp('test2.fits'), mode='append') as new_hdul:
for _ in range(2):
with fits.open(self.temp('test.fits')) as h:
new_hdul.append(h[2])
new_hdul.flush()
# Test that all the VLA copies wrote correctly
with fits.open(self.temp('test2.fits')) as new_hdul:
for idx in range(1, 3):
assert comparerecords(new_hdul[idx].data, t2.data)
def test_vla_with_gap(self):
hdul = fits.open(self.data('theap-gap.fits'))
data = hdul[1].data
assert data.shape == (500,)
assert data['i'][497] == 497
assert np.array_equal(data['arr'][497], [0, 1, 2, 3, 4])
hdul.close()
def test_tolist(self):
col = fits.Column(
name='var', format='PI()',
array=np.array([[1, 2, 3], [11, 12]], dtype=np.object_))
hdu = fits.BinTableHDU.from_columns([col])
assert hdu.data.tolist() == [[[1, 2, 3]], [[11, 12]]]
assert hdu.data['var'].tolist() == [[1, 2, 3], [11, 12]]
def test_tolist_from_file(self):
filename = self.data('variable_length_table.fits')
with fits.open(filename) as hdul:
hdu = hdul[1]
assert hdu.data.tolist() == [[[45, 56], [11, 3]], [[11, 12, 13], [12, 4]]]
assert hdu.data['var'].tolist() == [[45, 56], [11, 12, 13]]
# These are tests that solely test the Column and ColDefs interfaces and
# related functionality without directly involving full tables; currently there
# are few of these but I expect there to be more as I improve the test coverage
class TestColumnFunctions(FitsTestCase):
def test_column_format_interpretation(self):
"""
Test to ensure that when Numpy-style record formats are passed in to
the Column constructor for the format argument, they are recognized so
long as it's unambiguous (where "unambiguous" here is questionable
since Numpy is case insensitive when parsing the format codes. But
their "proper" case is lower-case, so we can accept that. Basically,
actually, any key in the NUMPY2FITS dict should be accepted.
"""
for recformat, fitsformat in NUMPY2FITS.items():
c = fits.Column('TEST', np.dtype(recformat))
c.format == fitsformat
c = fits.Column('TEST', recformat)
c.format == fitsformat
c = fits.Column('TEST', fitsformat)
c.format == fitsformat
# Test a few cases that are ambiguous in that they *are* valid binary
# table formats though not ones that are likely to be used, but are
# also valid common ASCII table formats
c = fits.Column('TEST', 'I4')
assert c.format == 'I4'
assert c.format.format == 'I'
assert c.format.width == 4
c = fits.Column('TEST', 'F15.8')
assert c.format == 'F15.8'
assert c.format.format == 'F'
assert c.format.width == 15
assert c.format.precision == 8
c = fits.Column('TEST', 'E15.8')
assert c.format.format == 'E'
assert c.format.width == 15
assert c.format.precision == 8
c = fits.Column('TEST', 'D15.8')
assert c.format.format == 'D'
assert c.format.width == 15
assert c.format.precision == 8
# zero-precision should be allowed as well, for float types
# https://github.com/astropy/astropy/issues/3422
c = fits.Column('TEST', 'F10.0')
assert c.format.format == 'F'
assert c.format.width == 10
assert c.format.precision == 0
c = fits.Column('TEST', 'E10.0')
assert c.format.format == 'E'
assert c.format.width == 10
assert c.format.precision == 0
c = fits.Column('TEST', 'D10.0')
assert c.format.format == 'D'
assert c.format.width == 10
assert c.format.precision == 0
# These are a couple cases where the format code is a valid binary
# table format, and is not strictly a valid ASCII table format but
# could be *interpreted* as one by appending a default width. This
# will only happen either when creating an ASCII table or when
# explicitly specifying ascii=True when the column is created
c = fits.Column('TEST', 'I')
assert c.format == 'I'
assert c.format.recformat == 'i2'
c = fits.Column('TEST', 'I', ascii=True)
assert c.format == 'I10'
assert c.format.recformat == 'i4'
# With specified widths, integer precision should be set appropriately
c = fits.Column('TEST', 'I4', ascii=True)
assert c.format == 'I4'
assert c.format.recformat == 'i2'
c = fits.Column('TEST', 'I9', ascii=True)
assert c.format == 'I9'
assert c.format.recformat == 'i4'
c = fits.Column('TEST', 'I12', ascii=True)
assert c.format == 'I12'
assert c.format.recformat == 'i8'
c = fits.Column('TEST', 'E')
assert c.format == 'E'
assert c.format.recformat == 'f4'
c = fits.Column('TEST', 'E', ascii=True)
assert c.format == 'E15.7'
# F is not a valid binary table format so it should be unambiguously
# treated as an ASCII column
c = fits.Column('TEST', 'F')
assert c.format == 'F16.7'
c = fits.Column('TEST', 'D')
assert c.format == 'D'
assert c.format.recformat == 'f8'
c = fits.Column('TEST', 'D', ascii=True)
assert c.format == 'D25.17'
def test_zero_precision_float_column(self):
"""
Regression test for https://github.com/astropy/astropy/issues/3422
"""
c = fits.Column('TEST', 'F5.0', array=[1.1, 2.2, 3.3])
# The decimal places will be clipped
t = fits.TableHDU.from_columns([c])
t.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert hdul[1].header['TFORM1'] == 'F5.0'
assert hdul[1].data['TEST'].dtype == np.dtype('float64')
assert np.all(hdul[1].data['TEST'] == [1.0, 2.0, 3.0])
# Check how the raw data looks
raw = np.rec.recarray.field(hdul[1].data, 'TEST')
assert raw.tobytes() == b' 1. 2. 3.'
def test_column_array_type_mismatch(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/218"""
arr = [-99] * 20
col = fits.Column('mag', format='E', array=arr)
assert (arr == col.array).all()
def test_new_coldefs_with_invalid_seqence(self):
"""Test that a TypeError is raised when a ColDefs is instantiated with
a sequence of non-Column objects.
"""
pytest.raises(TypeError, fits.ColDefs, [1, 2, 3])
def test_coldefs_init_from_array(self):
"""Test that ColDefs._init_from_array works with single element data-
types as well as multi-element data-types
"""
nd_array = np.ndarray((1,), dtype=[('A', '<u4', (2,)), ('B', '>u2')])
col_defs = fits.column.ColDefs(nd_array)
assert 2**31 == col_defs['A'].bzero
assert 2**15 == col_defs['B'].bzero
def test_pickle(self):
"""
Regression test for https://github.com/astropy/astropy/issues/1597
Tests for pickling FITS_rec objects
"""
# open existing FITS tables (images pickle by default, no test needed):
with fits.open(self.data('tb.fits')) as btb:
# Test column array is delayed and can pickle
assert isinstance(btb[1].columns._arrays[0], Delayed)
btb_pd = pickle.dumps(btb[1].data)
btb_pl = pickle.loads(btb_pd)
# It should not be delayed any more
assert not isinstance(btb[1].columns._arrays[0], Delayed)
assert comparerecords(btb_pl, btb[1].data)
with fits.open(self.data('ascii.fits')) as asc:
asc_pd = pickle.dumps(asc[1].data)
asc_pl = pickle.loads(asc_pd)
assert comparerecords(asc_pl, asc[1].data)
with fits.open(self.data('random_groups.fits')) as rgr:
rgr_pd = pickle.dumps(rgr[0].data)
rgr_pl = pickle.loads(rgr_pd)
assert comparerecords(rgr_pl, rgr[0].data)
with fits.open(self.data('zerowidth.fits')) as zwc:
# Doesn't pickle zero-width (_phanotm) column 'ORBPARM'
zwc_pd = pickle.dumps(zwc[2].data)
zwc_pl = pickle.loads(zwc_pd)
with pytest.warns(UserWarning, match=r'Field 2 has a repeat count '
r'of 0 in its format code'):
assert comparerecords(zwc_pl, zwc[2].data)
def test_column_lookup_by_name(self):
"""Tests that a `ColDefs` can be indexed by column name."""
a = fits.Column(name='a', format='D')
b = fits.Column(name='b', format='D')
cols = fits.ColDefs([a, b])
assert cols['a'] == cols[0]
assert cols['b'] == cols[1]
def test_column_attribute_change_after_removal(self):
"""
This is a test of the column attribute change notification system.
After a column has been removed from a table (but other references
are kept to that same column) changes to that column's attributes
should not trigger a notification on the table it was removed from.
"""
# One way we can check this is to ensure there are no further changes
# to the header
table = fits.BinTableHDU.from_columns([
fits.Column('a', format='D'),
fits.Column('b', format='D')])
b = table.columns['b']
table.columns.del_col('b')
assert table.data.dtype.names == ('a',)
b.name = 'HELLO'
assert b.name == 'HELLO'
assert 'TTYPE2' not in table.header
assert table.header['TTYPE1'] == 'a'
assert table.columns.names == ['a']
with pytest.raises(KeyError):
table.columns['b']
# Make sure updates to the remaining column still work
table.columns.change_name('a', 'GOODBYE')
with pytest.raises(KeyError):
table.columns['a']
assert table.columns['GOODBYE'].name == 'GOODBYE'
assert table.data.dtype.names == ('GOODBYE',)
assert table.columns.names == ['GOODBYE']
assert table.data.columns.names == ['GOODBYE']
table.columns['GOODBYE'].name = 'foo'
with pytest.raises(KeyError):
table.columns['GOODBYE']
assert table.columns['foo'].name == 'foo'
assert table.data.dtype.names == ('foo',)
assert table.columns.names == ['foo']
assert table.data.columns.names == ['foo']
def test_x_column_deepcopy(self):
"""
Regression test for https://github.com/astropy/astropy/pull/4514
Tests that columns with the X (bit array) format can be deep-copied.
"""
c = fits.Column('xcol', format='5X', array=[1, 0, 0, 1, 0])
c2 = copy.deepcopy(c)
assert c2.name == c.name
assert c2.format == c.format
assert np.all(c2.array == c.array)
def test_p_column_deepcopy(self):
"""
Regression test for https://github.com/astropy/astropy/pull/4514
Tests that columns with the P/Q formats (variable length arrays) can be
deep-copied.
"""
c = fits.Column('pcol', format='PJ', array=[[1, 2], [3, 4, 5]])
c2 = copy.deepcopy(c)
assert c2.name == c.name
assert c2.format == c.format
assert np.all(c2.array[0] == c.array[0])
assert np.all(c2.array[1] == c.array[1])
c3 = fits.Column('qcol', format='QJ', array=[[1, 2], [3, 4, 5]])
c4 = copy.deepcopy(c3)
assert c4.name == c3.name
assert c4.format == c3.format
assert np.all(c4.array[0] == c3.array[0])
assert np.all(c4.array[1] == c3.array[1])
def test_column_verify_keywords(self):
"""
Test that the keyword arguments used to initialize a Column, specifically
those that typically read from a FITS header (so excluding array),
are verified to have a valid value.
"""
with pytest.raises(AssertionError) as err:
_ = fits.Column(1, format='I', array=[1, 2, 3, 4, 5])
assert 'Column name must be a string able to fit' in str(err.value)
with pytest.raises(VerifyError) as err:
_ = fits.Column('col', format=0, null='Nan', disp=1, coord_type=1,
coord_unit=2, coord_inc='1', time_ref_pos=1,
coord_ref_point='1', coord_ref_value='1')
err_msgs = ['keyword arguments to Column were invalid',
'TFORM', 'TNULL', 'TDISP', 'TCTYP', 'TCUNI', 'TCRPX',
'TCRVL', 'TCDLT', 'TRPOS']
for msg in err_msgs:
assert msg in str(err.value)
def test_column_verify_start(self):
"""
Regression test for https://github.com/astropy/astropy/pull/6359
Test the validation of the column start position option (ASCII table only),
corresponding to ``TBCOL`` keyword.
Test whether the VerifyError message generated is the one with highest priority,
i.e. the order of error messages to be displayed is maintained.
"""
with pytest.raises(VerifyError) as err:
_ = fits.Column('a', format='B', start='a', array=[1, 2, 3])
assert "start option (TBCOLn) is not allowed for binary table columns" in str(err.value)
with pytest.raises(VerifyError) as err:
_ = fits.Column('a', format='I', start='a', array=[1, 2, 3])
assert "start option (TBCOLn) must be a positive integer (got 'a')." in str(err.value)
with pytest.raises(VerifyError) as err:
_ = fits.Column('a', format='I', start='-56', array=[1, 2, 3])
assert "start option (TBCOLn) must be a positive integer (got -56)." in str(err.value)
@pytest.mark.parametrize('keys',
[{'TFORM': 'Z', 'TDISP': 'E'},
{'TFORM': '2', 'TDISP': '2E'},
{'TFORM': 3, 'TDISP': 6.3},
{'TFORM': float, 'TDISP': np.float64},
{'TFORM': '', 'TDISP': 'E.5'}])
def test_column_verify_formats(self, keys):
"""
Additional tests for verification of 'TFORM' and 'TDISP' keyword
arguments used to initialize a Column.
"""
with pytest.raises(VerifyError) as err:
_ = fits.Column('col', format=keys['TFORM'], disp=keys['TDISP'])
for key in keys.keys():
assert key in str(err.value)
assert str(keys[key]) in str(err.value)
def test_regression_5383():
# Regression test for an undefined variable
x = np.array([1, 2, 3])
col = fits.Column(name='a', array=x, format='E')
hdu = fits.BinTableHDU.from_columns([col])
del hdu._header['TTYPE1']
hdu.columns[0].name = 'b'
def test_table_to_hdu():
from astropy.table import Table
table = Table([[1, 2, 3], ['a', 'b', 'c'], [2.3, 4.5, 6.7]],
names=['a', 'b', 'c'], dtype=['i', 'U1', 'f'])
table['a'].unit = 'm/s'
table['b'].unit = 'not-a-unit'
table.meta['foo'] = 'bar'
with pytest.warns(UnitsWarning, match="'not-a-unit' did not parse as"
" fits unit") as w:
hdu = fits.BinTableHDU(table, header=fits.Header({'TEST': 1}))
assert len(w) == 1
for name in 'abc':
assert np.array_equal(table[name], hdu.data[name])
# Check that TUNITn cards appear in the correct order
# (https://github.com/astropy/astropy/pull/5720)
assert hdu.header.index('TUNIT1') < hdu.header.index('TTYPE2')
assert hdu.header['FOO'] == 'bar'
assert hdu.header['TEST'] == 1
def test_regression_scalar_indexing():
# Indexing a FITS_rec with a tuple that returns a scalar record
# should work
x = np.array([(1.0, 2), (3.0, 4)],
dtype=[('x', float), ('y', int)]).view(fits.FITS_rec)
x1a = x[1]
# this should succeed.
x1b = x[(1,)]
# FITS_record does not define __eq__; so test elements.
assert all(a == b for a, b in zip(x1a, x1b))
def test_new_column_attributes_preserved(tmpdir):
# Regression test for https://github.com/astropy/astropy/issues/7145
# This makes sure that for now we don't clear away keywords that have
# newly been recognized (in Astropy 3.0) as special column attributes but
# instead just warn that we might do so in future. The new keywords are:
# TCTYP, TCUNI, TCRPX, TCRVL, TCDLT, TRPOS
col = []
col.append(fits.Column(name="TIME", format="1E", unit="s"))
col.append(fits.Column(name="RAWX", format="1I", unit="pixel"))
col.append(fits.Column(name="RAWY", format="1I"))
cd = fits.ColDefs(col)
hdr = fits.Header()
# Keywords that will get ignored in favor of these in the data
hdr['TUNIT1'] = 'pixel'
hdr['TUNIT2'] = 'm'
hdr['TUNIT3'] = 'm'
# Keywords that were added in Astropy 3.0 that should eventually be
# ignored and set on the data instead
hdr['TCTYP2'] = 'RA---TAN'
hdr['TCTYP3'] = 'ANGLE'
hdr['TCRVL2'] = -999.0
hdr['TCRVL3'] = -999.0
hdr['TCRPX2'] = 1.0
hdr['TCRPX3'] = 1.0
hdr['TALEN2'] = 16384
hdr['TALEN3'] = 1024
hdr['TCUNI2'] = 'angstrom'
hdr['TCUNI3'] = 'deg'
# Other non-relevant keywords
hdr['RA'] = 1.5
hdr['DEC'] = 3.0
with pytest.warns(AstropyDeprecationWarning) as warning_list:
hdu = fits.BinTableHDU.from_columns(cd, hdr)
assert str(warning_list[0].message).startswith(
"The following keywords are now recognized as special")
# First, check that special keywords such as TUNIT are ignored in the header
# We may want to change that behavior in future, but this is the way it's
# been for a while now.
assert hdu.columns[0].unit == 's'
assert hdu.columns[1].unit == 'pixel'
assert hdu.columns[2].unit is None
assert hdu.header['TUNIT1'] == 's'
assert hdu.header['TUNIT2'] == 'pixel'
assert 'TUNIT3' not in hdu.header # TUNIT3 was removed
# Now, check that the new special keywords are actually still there
# but weren't used to set the attributes on the data
assert hdu.columns[0].coord_type is None
assert hdu.columns[1].coord_type is None
assert hdu.columns[2].coord_type is None
assert 'TCTYP1' not in hdu.header
assert hdu.header['TCTYP2'] == 'RA---TAN'
assert hdu.header['TCTYP3'] == 'ANGLE'
# Make sure that other keywords are still there
assert hdu.header['RA'] == 1.5
assert hdu.header['DEC'] == 3.0
# Now we can write this HDU to a file and re-load. Re-loading *should*
# cause the special column attribtues to be picked up (it's just that when a
# header is manually specified, these values are ignored)
filename = tmpdir.join('test.fits').strpath
hdu.writeto(filename)
# Make sure we don't emit a warning in this case
with warnings.catch_warnings(record=True) as warning_list:
with fits.open(filename) as hdul:
hdu2 = hdul[1]
assert len(warning_list) == 0
# Check that column attributes are now correctly set
assert hdu2.columns[0].unit == 's'
assert hdu2.columns[1].unit == 'pixel'
assert hdu2.columns[2].unit is None
assert hdu2.header['TUNIT1'] == 's'
assert hdu2.header['TUNIT2'] == 'pixel'
assert 'TUNIT3' not in hdu2.header # TUNIT3 was removed
# Now, check that the new special keywords are actually still there
# but weren't used to set the attributes on the data
assert hdu2.columns[0].coord_type is None
assert hdu2.columns[1].coord_type == 'RA---TAN'
assert hdu2.columns[2].coord_type == 'ANGLE'
assert 'TCTYP1' not in hdu2.header
assert hdu2.header['TCTYP2'] == 'RA---TAN'
assert hdu2.header['TCTYP3'] == 'ANGLE'
# Make sure that other keywords are still there
assert hdu2.header['RA'] == 1.5
assert hdu2.header['DEC'] == 3.0
def test_empty_table(tmpdir):
ofile = str(tmpdir.join('emptytable.fits'))
hdu = fits.BinTableHDU(header=None, data=None, name='TEST')
hdu.writeto(ofile)
with fits.open(ofile) as hdul:
assert hdul['TEST'].data.size == 0
ofile = str(tmpdir.join('emptytable.fits.gz'))
hdu = fits.BinTableHDU(header=None, data=None, name='TEST')
hdu.writeto(ofile, overwrite=True)
with fits.open(ofile) as hdul:
assert hdul['TEST'].data.size == 0
def test_a3dtable(tmpdir):
testfile = str(tmpdir.join('test.fits'))
hdu = fits.BinTableHDU.from_columns([
fits.Column(name='FOO', format='J', array=np.arange(10))
])
hdu.header['XTENSION'] = 'A3DTABLE'
hdu.writeto(testfile, output_verify='ignore')
with fits.open(testfile) as hdul:
assert hdul[1].header['XTENSION'] == 'A3DTABLE'
with pytest.warns(AstropyUserWarning) as w:
hdul.verify('fix')
assert str(w[0].message) == 'Verification reported errors:'
assert str(w[2].message).endswith(
'Converted the XTENSION keyword to BINTABLE.')
assert hdul[1].header['XTENSION'] == 'BINTABLE'
def test_invalid_file(tmp_path):
hdu = fits.BinTableHDU()
# little trick to write an invalid card ...
hdu.header['FOO'] = None
hdu.header.cards['FOO']._value = np.nan
testfile = tmp_path / 'test.fits'
hdu.writeto(testfile, output_verify='ignore')
with fits.open(testfile) as hdul:
assert hdul[1].data is not None
def test_unit_parse_strict(tmp_path):
path = tmp_path / 'invalid_unit.fits'
# this is a unit parseable by the generic format but invalid for FITS
invalid_unit = '1 / (MeV sr s)'
unit = Unit(invalid_unit)
t = Table({'a': [1, 2, 3]})
t.write(path)
with fits.open(path, mode='update') as hdul:
hdul[1].header['TUNIT1'] = invalid_unit
# default is "warn"
with pytest.warns(UnitsWarning):
t = Table.read(path)
assert isinstance(t['a'].unit, UnrecognizedUnit)
t = Table.read(path, unit_parse_strict='silent')
assert isinstance(t['a'].unit, UnrecognizedUnit)
with pytest.raises(ValueError):
Table.read(path, unit_parse_strict='raise')
with pytest.warns(UnitsWarning):
Table.read(path, unit_parse_strict='warn')
|
beeaa29a57453ca9b2530874fcce91b8a9529b669f192e13d5a8544e413343d4 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see PYFITS.rst
import copy
import collections
import warnings
from io import StringIO, BytesIO
import pytest
import numpy as np
from astropy.io import fits
from astropy.io.fits.verify import VerifyWarning, VerifyError
from astropy.utils.exceptions import AstropyUserWarning
from . import FitsTestCase
from astropy.io.fits.card import _pad
from astropy.io.fits.header import _pad_length
from astropy.io.fits.util import encode_ascii
def test_shallow_copy():
"""Make sure that operations on a shallow copy do not alter the original.
#4990."""
original_header = fits.Header([('a', 1), ('b', 1)])
copied_header = copy.copy(original_header)
# Modifying the original dict should not alter the copy
original_header['c'] = 100
assert 'c' not in copied_header
# and changing the copy should not change the original.
copied_header['a'] = 0
assert original_header['a'] == 1
def test_init_with_header():
"""Make sure that creating a Header from another Header makes a copy if
copy is True."""
original_header = fits.Header([('a', 10)])
new_header = fits.Header(original_header, copy=True)
original_header['a'] = 20
assert new_header['a'] == 10
new_header['a'] = 0
assert original_header['a'] == 20
def test_init_with_dict():
dict1 = {'a': 11, 'b': 12, 'c': 13, 'd': 14, 'e': 15}
h1 = fits.Header(dict1)
for i in dict1:
assert dict1[i] == h1[i]
def test_init_with_ordereddict():
# Create a list of tuples. Each tuple consisting of a letter and the number
list1 = [(i, j) for j, i in enumerate('abcdefghijklmnopqrstuvwxyz')]
# Create an ordered dictionary and a header from this dictionary
dict1 = collections.OrderedDict(list1)
h1 = fits.Header(dict1)
# Check that the order is preserved of the initial list
assert all(h1[val] == list1[i][1] for i, val in enumerate(h1))
class TestHeaderFunctions(FitsTestCase):
"""Test Header and Card objects."""
def test_rename_keyword(self):
"""Test renaming keyword with rename_keyword."""
header = fits.Header([('A', 'B', 'C'), ('D', 'E', 'F')])
header.rename_keyword('A', 'B')
assert 'A' not in header
assert 'B' in header
assert header[0] == 'B'
assert header['B'] == 'B'
assert header.comments['B'] == 'C'
@pytest.mark.parametrize('key', ['A', 'a'])
def test_indexing_case(self, key):
"""Check that indexing is case insensitive"""
header = fits.Header([('A', 'B', 'C'), ('D', 'E', 'F')])
assert key in header
assert header[key] == 'B'
assert header.get(key) == 'B'
assert header.index(key) == 0
assert header.comments[key] == 'C'
assert header.count(key) == 1
header.remove(key, ignore_missing=False)
def test_card_constructor_default_args(self):
"""Test Card constructor with default argument values."""
c = fits.Card()
assert '' == c.keyword
def test_card_from_bytes(self):
"""
Test loading a Card from a `bytes` object (assuming latin-1 encoding).
"""
c = fits.Card.fromstring(b"ABC = 'abc'")
assert c.keyword == 'ABC'
assert c.value == 'abc'
def test_string_value_card(self):
"""Test Card constructor with string value"""
c = fits.Card('abc', '<8 ch')
assert str(c) == _pad("ABC = '<8 ch '")
c = fits.Card('nullstr', '')
assert str(c) == _pad("NULLSTR = ''")
def test_boolean_value_card(self):
"""Test Card constructor with boolean value"""
c = fits.Card("abc", True)
assert str(c) == _pad("ABC = T")
c = fits.Card.fromstring('ABC = F')
assert c.value is False
def test_long_integer_value_card(self):
"""Test Card constructor with long integer value"""
c = fits.Card('long_int', -467374636747637647347374734737437)
assert str(c) == _pad("LONG_INT= -467374636747637647347374734737437")
def test_floating_point_value_card(self):
"""Test Card constructor with floating point value"""
c = fits.Card('floatnum', -467374636747637647347374734737437.)
if (str(c) != _pad("FLOATNUM= -4.6737463674763E+32") and
str(c) != _pad("FLOATNUM= -4.6737463674763E+032")):
assert str(c) == _pad("FLOATNUM= -4.6737463674763E+32")
def test_complex_value_card(self):
"""Test Card constructor with complex value"""
c = fits.Card('abc',
(1.2345377437887837487e88 + 6324767364763746367e-33j))
f1 = _pad("ABC = (1.23453774378878E+88, 6.32476736476374E-15)")
f2 = _pad("ABC = (1.2345377437887E+088, 6.3247673647637E-015)")
f3 = _pad("ABC = (1.23453774378878E+88, 6.32476736476374E-15)")
if str(c) != f1 and str(c) != f2:
assert str(c) == f3
def test_card_image_constructed_too_long(self):
"""Test that over-long cards truncate the comment"""
# card image constructed from key/value/comment is too long
# (non-string value)
c = fits.Card('abc', 9, 'abcde' * 20)
with pytest.warns(fits.verify.VerifyWarning):
assert (str(c) ==
"ABC = 9 "
"/ abcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeab")
c = fits.Card('abc', 'a' * 68, 'abcdefg')
with pytest.warns(fits.verify.VerifyWarning):
assert str(c) == f"ABC = '{'a' * 68}'"
def test_constructor_filter_illegal_data_structures(self):
"""Test that Card constructor raises exceptions on bad arguments"""
pytest.raises(ValueError, fits.Card, ('abc',), {'value': (2, 3)})
pytest.raises(ValueError, fits.Card, 'key', [], 'comment')
def test_keyword_too_long(self):
"""Test that long Card keywords are allowed, but with a warning"""
pytest.warns(UserWarning, fits.Card, 'abcdefghi', 'long')
def test_illegal_characters_in_key(self):
"""
Test that Card constructor allows illegal characters in the keyword,
but creates a HIERARCH card.
"""
# This test used to check that a ValueError was raised, because a
# keyword like 'abc+' was simply not allowed. Now it should create a
# HIERARCH card.
with pytest.warns(AstropyUserWarning) as w:
c = fits.Card('abc+', 9)
assert len(w) == 1
assert c.image == _pad('HIERARCH abc+ = 9')
def test_add_history(self):
header = fits.Header([('A', 'B', 'C'), ('HISTORY', 1),
('HISTORY', 2), ('HISTORY', 3), ('', '', ''),
('', '', '')])
header.add_history(4)
# One of the blanks should get used, so the length shouldn't change
assert len(header) == 6
assert header.cards[4].value == 4
assert header['HISTORY'] == [1, 2, 3, 4]
assert repr(header['HISTORY']) == '1\n2\n3\n4'
header.add_history(0, after='A')
assert len(header) == 6
assert header.cards[1].value == 0
assert header['HISTORY'] == [0, 1, 2, 3, 4]
def test_add_blank(self):
header = fits.Header([('A', 'B', 'C'), ('', 1), ('', 2), ('', 3),
('', '', ''), ('', '', '')])
header.add_blank(4)
# This time a new blank should be added, and the existing blanks don't
# get used... (though this is really kinda sketchy--there's a
# distinction between truly blank cards, and cards with blank keywords
# that isn't currently made int he code)
assert len(header) == 7
assert header.cards[6].value == 4
assert header[''] == [1, 2, 3, '', '', 4]
assert repr(header['']) == '1\n2\n3\n\n\n4'
header.add_blank(0, after='A')
assert len(header) == 8
assert header.cards[1].value == 0
assert header[''] == [0, 1, 2, 3, '', '', 4]
header[''] = 5
header[' '] = 6
assert header[''] == [0, 1, 2, 3, '', '', 4, 5, 6]
assert header[' '] == [0, 1, 2, 3, '', '', 4, 5, 6]
def test_update(self):
class FakeHeader(list):
def keys(self):
return [l[0] for l in self]
def __getitem__(self, key):
return next(l[1:] for l in self if l[0] == key)
header = fits.Header()
header.update({'FOO': ('BAR', 'BAZ')})
header.update(FakeHeader([('A', 1), ('B', 2, 'comment')]))
assert set(header.keys()) == {'FOO', 'A', 'B'}
assert header.comments['B'] == 'comment'
# test that comments are preserved
tmphdr = fits.Header()
tmphdr['HELLO'] = (1, 'this is a comment')
header.update(tmphdr)
assert set(header.keys()) == {'FOO', 'A', 'B', 'HELLO'}
assert header.comments['HELLO'] == 'this is a comment'
header.update(NAXIS1=100, NAXIS2=100)
assert set(header.keys()) == {'FOO', 'A', 'B', 'HELLO', 'NAXIS1', 'NAXIS2'}
assert set(header.values()) == {'BAR', 1, 2, 100, 100}
def test_update_comment(self):
hdul = fits.open(self.data('arange.fits'))
hdul[0].header.update({'FOO': ('BAR', 'BAZ')})
assert hdul[0].header['FOO'] == 'BAR'
assert hdul[0].header.comments['FOO'] == 'BAZ'
with pytest.raises(ValueError):
hdul[0].header.update({'FOO2': ('BAR', 'BAZ', 'EXTRA')})
hdul.writeto(self.temp('test.fits'))
hdul.close()
hdul = fits.open(self.temp('test.fits'), mode='update')
hdul[0].header.comments['FOO'] = 'QUX'
hdul.close()
hdul = fits.open(self.temp('test.fits'))
assert hdul[0].header.comments['FOO'] == 'QUX'
hdul[0].header.add_comment(0, after='FOO')
assert str(hdul[0].header.cards[-1]).strip() == 'COMMENT 0'
hdul.close()
def test_commentary_cards(self):
# commentary cards
val = "A commentary card's value has no quotes around it."
c = fits.Card("HISTORY", val)
assert str(c) == _pad('HISTORY ' + val)
val = "A commentary card has no comment."
c = fits.Card("COMMENT", val, "comment")
assert str(c) == _pad('COMMENT ' + val)
def test_commentary_card_created_by_fromstring(self):
# commentary card created by fromstring()
c = fits.Card.fromstring(
"COMMENT card has no comments. "
"/ text after slash is still part of the value.")
assert (c.value == 'card has no comments. '
'/ text after slash is still part of the value.')
assert c.comment == ''
def test_commentary_card_will_not_parse_numerical_value(self):
# commentary card will not parse the numerical value
c = fits.Card.fromstring("HISTORY (1, 2)")
assert str(c) == _pad("HISTORY (1, 2)")
def test_equal_sign_after_column8(self):
# equal sign after column 8 of a commentary card will be part of the
# string value
c = fits.Card.fromstring("HISTORY = (1, 2)")
assert str(c) == _pad("HISTORY = (1, 2)")
def test_blank_keyword(self):
c = fits.Card('', ' / EXPOSURE INFORMATION')
assert str(c) == _pad(' / EXPOSURE INFORMATION')
c = fits.Card.fromstring(str(c))
assert c.keyword == ''
assert c.value == ' / EXPOSURE INFORMATION'
def test_specify_undefined_value(self):
# this is how to specify an undefined value
c = fits.Card("undef", fits.card.UNDEFINED)
assert str(c) == _pad("UNDEF =")
def test_complex_number_using_string_input(self):
# complex number using string input
c = fits.Card.fromstring('ABC = (8, 9)')
assert str(c) == _pad("ABC = (8, 9)")
def test_fixable_non_standard_fits_card(self, capsys):
# fixable non-standard FITS card will keep the original format
c = fits.Card.fromstring('abc = + 2.1 e + 12')
assert c.value == 2100000000000.0
with pytest.warns(fits.verify.VerifyWarning,
match=r'Verification reported errors'):
assert str(c) == _pad("ABC = +2.1E+12")
def test_fixable_non_fsc(self):
# fixable non-FSC: if the card is not parsable, it's value will be
# assumed
# to be a string and everything after the first slash will be comment
c = fits.Card.fromstring(
"no_quote= this card's value has no quotes "
"/ let's also try the comment")
with pytest.warns(fits.verify.VerifyWarning,
match=r'Verification reported errors'):
assert (str(c) == "NO_QUOTE= 'this card''s value has no quotes' "
"/ let's also try the comment ")
def test_undefined_value_using_string_input(self):
# undefined value using string input
c = fits.Card.fromstring('ABC = ')
assert str(c) == _pad("ABC =")
def test_undefined_keys_values(self):
header = fits.Header()
header['FOO'] = 'BAR'
header['UNDEF'] = None
assert list(header.values()) == ['BAR', None]
assert list(header.items()) == [('FOO', 'BAR'), ('UNDEF', None)]
def test_mislocated_equal_sign(self, capsys):
# test mislocated "=" sign
c = fits.Card.fromstring('XYZ= 100')
assert c.keyword == 'XYZ'
assert c.value == 100
with pytest.warns(fits.verify.VerifyWarning,
match=r'Verification reported errors'):
assert str(c) == _pad("XYZ = 100")
def test_equal_only_up_to_column_10(self, capsys):
# the test of "=" location is only up to column 10
# This test used to check if Astropy rewrote this card to a new format,
# something like "HISTO = '= (1, 2)". But since ticket #109 if the
# format is completely wrong we don't make any assumptions and the card
# should be left alone
c = fits.Card.fromstring("HISTO = (1, 2)")
with pytest.warns(AstropyUserWarning,
match=r'header keyword is invalid'):
assert str(c) == _pad("HISTO = (1, 2)")
# Likewise this card should just be left in its original form and
# we shouldn't guess how to parse it or rewrite it.
c = fits.Card.fromstring(" HISTORY (1, 2)")
with pytest.warns(AstropyUserWarning,
match=r'header keyword is invalid'):
assert str(c) == _pad(" HISTORY (1, 2)")
def test_verify_invalid_equal_sign(self):
# verification
c = fits.Card.fromstring('ABC= a6')
with pytest.warns(AstropyUserWarning) as w:
c.verify()
err_text1 = ("Card 'ABC' is not FITS standard (equal sign not at "
"column 8)")
err_text2 = ("Card 'ABC' is not FITS standard (invalid value "
"string: 'a6'")
assert len(w) == 4
assert err_text1 in str(w[1].message)
assert err_text2 in str(w[2].message)
def test_fix_invalid_equal_sign(self):
fix_text = "Fixed 'ABC' card to meet the FITS standard."
c = fits.Card.fromstring('ABC= a6')
with pytest.warns(AstropyUserWarning, match=fix_text) as w:
c.verify('fix')
assert len(w) == 4
assert str(c) == _pad("ABC = 'a6 '")
def test_long_string_value(self):
# test long string value
c = fits.Card('abc', 'long string value ' * 10, 'long comment ' * 10)
assert (str(c) ==
"ABC = 'long string value long string value long string value long string &' "
"CONTINUE 'value long string value long string value long string value long &' "
"CONTINUE 'string value long string value long string value &' "
"CONTINUE '&' / long comment long comment long comment long comment long "
"CONTINUE '&' / comment long comment long comment long comment long comment "
"CONTINUE '' / long comment ")
def test_long_string_value_with_multiple_long_words(self):
"""
Regression test for https://github.com/astropy/astropy/issues/11298
"""
c = fits.Card('WHATEVER',
'SuperCalibrationParameters_XXXX_YYYY_ZZZZZ_KK_01_02_'
'03)-AAABBBCCC.n.h5 SuperNavigationParameters_XXXX_YYYY'
'_ZZZZZ_KK_01_02_03)-AAABBBCCC.n.xml')
assert (str(c) ==
"WHATEVER= 'SuperCalibrationParameters_XXXX_YYYY_ZZZZZ_KK_01_02_03)-AAABBBCCC.n&'"
"CONTINUE '.h5 &' "
"CONTINUE 'SuperNavigationParameters_XXXX_YYYY_ZZZZZ_KK_01_02_03)-AAABBBCCC.n.&'"
"CONTINUE 'xml' ")
def test_long_unicode_string(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/1
So long as a unicode string can be converted to ASCII it should have no
different behavior in this regard from a byte string.
"""
h1 = fits.Header()
h1['TEST'] = 'abcdefg' * 30
h2 = fits.Header()
h2['TEST'] = 'abcdefg' * 30
assert str(h1) == str(h2)
def test_long_string_repr(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/193
Ensure that the __repr__() for cards represented with CONTINUE cards is
split across multiple lines (broken at each *physical* card).
"""
header = fits.Header()
header['TEST1'] = ('Regular value', 'Regular comment')
header['TEST2'] = ('long string value ' * 10, 'long comment ' * 10)
header['TEST3'] = ('Regular value', 'Regular comment')
assert (repr(header).splitlines() ==
[str(fits.Card('TEST1', 'Regular value', 'Regular comment')),
"TEST2 = 'long string value long string value long string value long string &' ",
"CONTINUE 'value long string value long string value long string value long &' ",
"CONTINUE 'string value long string value long string value &' ",
"CONTINUE '&' / long comment long comment long comment long comment long ",
"CONTINUE '&' / comment long comment long comment long comment long comment ",
"CONTINUE '' / long comment ",
str(fits.Card('TEST3', 'Regular value', 'Regular comment'))])
def test_blank_keyword_long_value(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/194
Test that a blank keyword ('') can be assigned a too-long value that is
continued across multiple cards with blank keywords, just like COMMENT
and HISTORY cards.
"""
value = 'long string value ' * 10
header = fits.Header()
header[''] = value
assert len(header) == 3
assert ' '.join(header['']) == value.rstrip()
# Ensure that this works like other commentary keywords
header['COMMENT'] = value
header['HISTORY'] = value
assert header['COMMENT'] == header['HISTORY']
assert header['COMMENT'] == header['']
def test_long_string_from_file(self):
c = fits.Card('abc', 'long string value ' * 10, 'long comment ' * 10)
hdu = fits.PrimaryHDU()
hdu.header.append(c)
hdu.writeto(self.temp('test_new.fits'))
hdul = fits.open(self.temp('test_new.fits'))
c = hdul[0].header.cards['abc']
hdul.close()
assert (str(c) ==
"ABC = 'long string value long string value long string value long string &' "
"CONTINUE 'value long string value long string value long string value long &' "
"CONTINUE 'string value long string value long string value &' "
"CONTINUE '&' / long comment long comment long comment long comment long "
"CONTINUE '&' / comment long comment long comment long comment long comment "
"CONTINUE '' / long comment ")
def test_word_in_long_string_too_long(self):
# if a word in a long string is too long, it will be cut in the middle
c = fits.Card('abc', 'longstringvalue' * 10, 'longcomment' * 10)
assert (str(c) ==
"ABC = 'longstringvaluelongstringvaluelongstringvaluelongstringvaluelongstr&'"
"CONTINUE 'ingvaluelongstringvaluelongstringvaluelongstringvaluelongstringvalu&'"
"CONTINUE 'elongstringvalue&' "
"CONTINUE '&' / longcommentlongcommentlongcommentlongcommentlongcommentlongcomme"
"CONTINUE '' / ntlongcommentlongcommentlongcommentlongcomment ")
def test_long_string_value_via_fromstring(self, capsys):
# long string value via fromstring() method
c = fits.Card.fromstring(
_pad("abc = 'longstring''s testing & ' "
"/ comments in line 1") +
_pad("continue 'continue with long string but without the "
"ampersand at the end' /") +
_pad("continue 'continue must have string value (with quotes)' "
"/ comments with ''. "))
with pytest.warns(fits.verify.VerifyWarning,
match=r'Verification reported errors'):
assert (str(c) ==
"ABC = 'longstring''s testing continue with long string but without the &' "
"CONTINUE 'ampersand at the endcontinue must have string value (with quotes)&' "
"CONTINUE '' / comments in line 1 comments with ''. ")
def test_continue_card_with_equals_in_value(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/117
"""
c = fits.Card.fromstring(
_pad("EXPR = '/grp/hst/cdbs//grid/pickles/dat_uvk/pickles_uk_10.fits * &'") +
_pad("CONTINUE '5.87359e-12 * MWAvg(Av=0.12)&'") +
_pad("CONTINUE '&' / pysyn expression"))
assert c.keyword == 'EXPR'
assert (c.value ==
'/grp/hst/cdbs//grid/pickles/dat_uvk/pickles_uk_10.fits '
'* 5.87359e-12 * MWAvg(Av=0.12)')
assert c.comment == 'pysyn expression'
def test_final_continue_card_lacks_ampersand(self):
"""
Regression test for https://github.com/astropy/astropy/issues/3282
"""
h = fits.Header()
h['SVALUE'] = 'A' * 69
assert repr(h).splitlines()[-1] == _pad("CONTINUE 'AA'")
def test_final_continue_card_ampersand_removal_on_long_comments(self):
"""
Regression test for https://github.com/astropy/astropy/issues/3282
"""
c = fits.Card('TEST', 'long value' * 10, 'long comment &' * 10)
assert (str(c) ==
"TEST = 'long valuelong valuelong valuelong valuelong valuelong valuelong &' "
"CONTINUE 'valuelong valuelong valuelong value&' "
"CONTINUE '&' / long comment &long comment &long comment &long comment &long "
"CONTINUE '&' / comment &long comment &long comment &long comment &long comment "
"CONTINUE '' / &long comment & ")
def test_hierarch_card_creation(self):
# Test automatic upgrade to hierarch card
with pytest.warns(AstropyUserWarning, match='HIERARCH card will be created') as w:
c = fits.Card('ESO INS SLIT2 Y1FRML',
'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)')
assert len(w) == 1
assert (str(c) ==
"HIERARCH ESO INS SLIT2 Y1FRML= "
"'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)'")
# Test manual creation of hierarch card
c = fits.Card('hierarch abcdefghi', 10)
assert str(c) == _pad("HIERARCH abcdefghi = 10")
c = fits.Card('HIERARCH ESO INS SLIT2 Y1FRML',
'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)')
assert (str(c) ==
"HIERARCH ESO INS SLIT2 Y1FRML= "
"'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)'")
def test_hierarch_with_abbrev_value_indicator(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/5
"""
c = fits.Card.fromstring("HIERARCH key.META_4='calFileVersion'")
assert c.keyword == 'key.META_4'
assert c.value == 'calFileVersion'
assert c.comment == ''
def test_hierarch_not_warn(self):
"""Check that compressed image headers do not issue HIERARCH warnings.
"""
filename = fits.util.get_testdata_filepath('compressed_image.fits')
with fits.open(filename) as hdul:
header = hdul[1].header
with warnings.catch_warnings(record=True) as warning_list:
header["HIERARCH LONG KEYWORD"] = 42
assert len(warning_list) == 0
assert header["LONG KEYWORD"] == 42
assert header["HIERARCH LONG KEYWORD"] == 42
# Check that it still warns if we do not use HIERARCH
with pytest.warns(fits.verify.VerifyWarning,
match=r'greater than 8 characters'):
header["LONG KEYWORD2"] = 1
assert header["LONG KEYWORD2"] == 1
def test_hierarch_keyword_whitespace(self):
"""
Regression test for
https://github.com/spacetelescope/PyFITS/issues/6
Make sure any leading or trailing whitespace around HIERARCH
keywords is stripped from the actual keyword value.
"""
c = fits.Card.fromstring(
"HIERARCH key.META_4 = 'calFileVersion'")
assert c.keyword == 'key.META_4'
assert c.value == 'calFileVersion'
assert c.comment == ''
# Test also with creation via the Card constructor
c = fits.Card('HIERARCH key.META_4', 'calFileVersion')
assert c.keyword == 'key.META_4'
assert c.value == 'calFileVersion'
assert c.comment == ''
def test_verify_mixed_case_hierarch(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/7
Assures that HIERARCH keywords with lower-case characters and other
normally invalid keyword characters are not considered invalid.
"""
c = fits.Card('HIERARCH WeirdCard.~!@#_^$%&', 'The value', 'a comment')
# This should not raise any exceptions
c.verify('exception')
assert c.keyword == 'WeirdCard.~!@#_^$%&'
assert c.value == 'The value'
assert c.comment == 'a comment'
# Test also the specific case from the original bug report
header = fits.Header([
('simple', True),
('BITPIX', 8),
('NAXIS', 0),
('EXTEND', True, 'May contain datasets'),
('HIERARCH key.META_0', 'detRow')
])
hdu = fits.PrimaryHDU(header=header)
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
header2 = hdul[0].header
assert (str(header.cards[header.index('key.META_0')]) ==
str(header2.cards[header2.index('key.META_0')]))
def test_missing_keyword(self):
"""Test that accessing a non-existent keyword raises a KeyError."""
header = fits.Header()
# De-referencing header through the inline function should behave
# identically to accessing it in the pytest.raises context below.
pytest.raises(KeyError, lambda k: header[k], 'NAXIS')
# Test exception with message
with pytest.raises(KeyError, match=r"Keyword 'NAXIS' not found."):
header['NAXIS']
def test_hierarch_card_lookup(self):
header = fits.Header()
header['hierarch abcdefghi'] = 10
assert 'abcdefghi' in header
assert header['abcdefghi'] == 10
# This used to be assert_false, but per ticket
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/155 hierarch keywords
# should be treated case-insensitively when performing lookups
assert 'ABCDEFGHI' in header
def test_hierarch_card_delete(self):
header = fits.Header()
header['hierarch abcdefghi'] = 10
del header['hierarch abcdefghi']
def test_hierarch_card_insert_delete(self):
header = fits.Header()
with pytest.warns(fits.verify.VerifyWarning,
match=r'greater than 8 characters'):
header['abcdefghi'] = 10
header['abcdefgh'] = 10
header['abcdefg'] = 10
with pytest.warns(fits.verify.VerifyWarning,
match=r'greater than 8 characters'):
header.insert(2, ('abcdefghij', 10))
del header['abcdefghij']
with pytest.warns(fits.verify.VerifyWarning,
match=r'greater than 8 characters'):
header.insert(2, ('abcdefghij', 10))
del header[2]
assert list(header.keys())[2] == 'abcdefg'.upper()
def test_hierarch_create_and_update(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/158
Tests several additional use cases for working with HIERARCH cards.
"""
msg = 'a HIERARCH card will be created'
header = fits.Header()
with pytest.warns(VerifyWarning) as w:
header.update({'HIERARCH BLAH BLAH': 'TESTA'})
assert len(w) == 0
assert 'BLAH BLAH' in header
assert header['BLAH BLAH'] == 'TESTA'
header.update({'HIERARCH BLAH BLAH': 'TESTB'})
assert len(w) == 0
assert header['BLAH BLAH'], 'TESTB'
# Update without explicitly stating 'HIERARCH':
header.update({'BLAH BLAH': 'TESTC'})
assert len(w) == 1
assert len(header) == 1
assert header['BLAH BLAH'], 'TESTC'
# Test case-insensitivity
header.update({'HIERARCH blah blah': 'TESTD'})
assert len(w) == 1
assert len(header) == 1
assert header['blah blah'], 'TESTD'
header.update({'blah blah': 'TESTE'})
assert len(w) == 2
assert len(header) == 1
assert header['blah blah'], 'TESTE'
# Create a HIERARCH card > 8 characters without explicitly stating
# 'HIERARCH'
header.update({'BLAH BLAH BLAH': 'TESTA'})
assert len(w) == 3
assert msg in str(w[0].message)
header.update({'HIERARCH BLAH BLAH BLAH': 'TESTB'})
assert len(w) == 3
assert header['BLAH BLAH BLAH'], 'TESTB'
# Update without explicitly stating 'HIERARCH':
header.update({'BLAH BLAH BLAH': 'TESTC'})
assert len(w) == 4
assert header['BLAH BLAH BLAH'], 'TESTC'
# Test case-insensitivity
header.update({'HIERARCH blah blah blah': 'TESTD'})
assert len(w) == 4
assert header['blah blah blah'], 'TESTD'
header.update({'blah blah blah': 'TESTE'})
assert len(w) == 5
assert header['blah blah blah'], 'TESTE'
def test_short_hierarch_create_and_update(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/158
Tests several additional use cases for working with HIERARCH cards,
specifically where the keyword is fewer than 8 characters, but contains
invalid characters such that it can only be created as a HIERARCH card.
"""
msg = 'a HIERARCH card will be created'
header = fits.Header()
with pytest.warns(VerifyWarning) as w:
header.update({'HIERARCH BLA BLA': 'TESTA'})
assert len(w) == 0
assert 'BLA BLA' in header
assert header['BLA BLA'] == 'TESTA'
header.update({'HIERARCH BLA BLA': 'TESTB'})
assert len(w) == 0
assert header['BLA BLA'], 'TESTB'
# Update without explicitly stating 'HIERARCH':
header.update({'BLA BLA': 'TESTC'})
assert len(w) == 1
assert header['BLA BLA'], 'TESTC'
# Test case-insensitivity
header.update({'HIERARCH bla bla': 'TESTD'})
assert len(w) == 1
assert len(header) == 1
assert header['bla bla'], 'TESTD'
header.update({'bla bla': 'TESTE'})
assert len(w) == 2
assert len(header) == 1
assert header['bla bla'], 'TESTE'
header = fits.Header()
with pytest.warns(VerifyWarning) as w:
# Create a HIERARCH card containing invalid characters without
# explicitly stating 'HIERARCH'
header.update({'BLA BLA': 'TESTA'})
print([x.category for x in w])
assert len(w) == 1
assert msg in str(w[0].message)
header.update({'HIERARCH BLA BLA': 'TESTB'})
assert len(w) == 1
assert header['BLA BLA'], 'TESTB'
# Update without explicitly stating 'HIERARCH':
header.update({'BLA BLA': 'TESTC'})
assert len(w) == 2
assert header['BLA BLA'], 'TESTC'
# Test case-insensitivity
header.update({'HIERARCH bla bla': 'TESTD'})
assert len(w) == 2
assert len(header) == 1
assert header['bla bla'], 'TESTD'
header.update({'bla bla': 'TESTE'})
assert len(w) == 3
assert len(header) == 1
assert header['bla bla'], 'TESTE'
def test_header_setitem_invalid(self):
header = fits.Header()
def test():
header['FOO'] = ('bar', 'baz', 'qux')
pytest.raises(ValueError, test)
def test_header_setitem_1tuple(self):
header = fits.Header()
header['FOO'] = ('BAR',)
header['FOO2'] = (None,)
assert header['FOO'] == 'BAR'
assert header['FOO2'] is None
assert header[0] == 'BAR'
assert header.comments[0] == ''
assert header.comments['FOO'] == ''
def test_header_setitem_2tuple(self):
header = fits.Header()
header['FOO'] = ('BAR', 'BAZ')
header['FOO2'] = (None, None)
assert header['FOO'] == 'BAR'
assert header['FOO2'] is None
assert header[0] == 'BAR'
assert header.comments[0] == 'BAZ'
assert header.comments['FOO'] == 'BAZ'
assert header.comments['FOO2'] == ''
def test_header_set_value_to_none(self):
"""
Setting the value of a card to None should simply give that card an
undefined value. Undefined value should map to None.
"""
header = fits.Header()
header['FOO'] = 'BAR'
assert header['FOO'] == 'BAR'
header['FOO'] = None
assert header['FOO'] is None
# Create a header that contains an undefined value and a defined
# value.
hstr = "UNDEF = \nDEFINED = 42"
header = fits.Header.fromstring(hstr, sep='\n')
# Explicitly add a card with an UNDEFINED value
c = fits.Card("UNDEF2", fits.card.UNDEFINED)
header.extend([c])
# And now assign an undefined value to the header through setitem
header['UNDEF3'] = fits.card.UNDEFINED
# Tuple assignment
header.append(("UNDEF5", None, "Undefined value"), end=True)
header.append("UNDEF6")
assert header['DEFINED'] == 42
assert header['UNDEF'] is None
assert header['UNDEF2'] is None
assert header['UNDEF3'] is None
assert header['UNDEF5'] is None
assert header['UNDEF6'] is None
# Assign an undefined value to a new card
header['UNDEF4'] = None
# Overwrite an existing value with None
header["DEFINED"] = None
# All headers now should be undefined
for c in header.cards:
assert c.value == fits.card.UNDEFINED
def test_set_comment_only(self):
header = fits.Header([('A', 'B', 'C')])
header.set('A', comment='D')
assert header['A'] == 'B'
assert header.comments['A'] == 'D'
def test_header_iter(self):
header = fits.Header([('A', 'B'), ('C', 'D')])
assert list(header) == ['A', 'C']
def test_header_slice(self):
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')])
newheader = header[1:]
assert len(newheader) == 2
assert 'A' not in newheader
assert 'C' in newheader
assert 'E' in newheader
newheader = header[::-1]
assert len(newheader) == 3
assert newheader[0] == 'F'
assert newheader[1] == 'D'
assert newheader[2] == 'B'
newheader = header[::2]
assert len(newheader) == 2
assert 'A' in newheader
assert 'C' not in newheader
assert 'E' in newheader
def test_header_slice_assignment(self):
"""
Assigning to a slice should just assign new values to the cards
included in the slice.
"""
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')])
# Test assigning slice to the same value; this works similarly to numpy
# arrays
header[1:] = 1
assert header[1] == 1
assert header[2] == 1
# Though strings are iterable they should be treated as a scalar value
header[1:] = 'GH'
assert header[1] == 'GH'
assert header[2] == 'GH'
# Now assign via an iterable
header[1:] = ['H', 'I']
assert header[1] == 'H'
assert header[2] == 'I'
def test_header_slice_delete(self):
"""Test deleting a slice of cards from the header."""
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')])
del header[1:]
assert len(header) == 1
assert header[0] == 'B'
del header[:]
assert len(header) == 0
def test_wildcard_slice(self):
"""Test selecting a subsection of a header via wildcard matching."""
header = fits.Header([('ABC', 0), ('DEF', 1), ('ABD', 2)])
newheader = header['AB*']
assert len(newheader) == 2
assert newheader[0] == 0
assert newheader[1] == 2
def test_wildcard_with_hyphen(self):
"""
Regression test for issue where wildcards did not work on keywords
containing hyphens.
"""
header = fits.Header([('DATE', 1), ('DATE-OBS', 2), ('DATE-FOO', 3)])
assert len(header['DATE*']) == 3
assert len(header['DATE?*']) == 2
assert len(header['DATE-*']) == 2
def test_wildcard_slice_assignment(self):
"""Test assigning to a header slice selected via wildcard matching."""
header = fits.Header([('ABC', 0), ('DEF', 1), ('ABD', 2)])
# Test assigning slice to the same value; this works similarly to numpy
# arrays
header['AB*'] = 1
assert header[0] == 1
assert header[2] == 1
# Though strings are iterable they should be treated as a scalar value
header['AB*'] = 'GH'
assert header[0] == 'GH'
assert header[2] == 'GH'
# Now assign via an iterable
header['AB*'] = ['H', 'I']
assert header[0] == 'H'
assert header[2] == 'I'
def test_wildcard_slice_deletion(self):
"""Test deleting cards from a header that match a wildcard pattern."""
header = fits.Header([('ABC', 0), ('DEF', 1), ('ABD', 2)])
del header['AB*']
assert len(header) == 1
assert header[0] == 1
def test_header_history(self):
header = fits.Header([('ABC', 0), ('HISTORY', 1), ('HISTORY', 2),
('DEF', 3), ('HISTORY', 4), ('HISTORY', 5)])
assert header['HISTORY'] == [1, 2, 4, 5]
def test_header_clear(self):
header = fits.Header([('A', 'B'), ('C', 'D')])
header.clear()
assert 'A' not in header
assert 'C' not in header
assert len(header) == 0
@pytest.mark.parametrize('fitsext', [fits.ImageHDU(), fits.CompImageHDU()])
def test_header_clear_write(self, fitsext):
hdulist = fits.HDUList([fits.PrimaryHDU(), fitsext])
hdulist[1].header['FOO'] = 'BAR'
hdulist[1].header.clear()
with pytest.raises(VerifyError) as err:
hdulist.writeto(self.temp('temp.fits'), overwrite=True)
err_msg = "'XTENSION' card does not exist."
assert err_msg in str(err.value)
def test_header_fromkeys(self):
header = fits.Header.fromkeys(['A', 'B'])
assert 'A' in header
assert header['A'] is None
assert header.comments['A'] == ''
assert 'B' in header
assert header['B'] is None
assert header.comments['B'] == ''
def test_header_fromkeys_with_value(self):
header = fits.Header.fromkeys(['A', 'B'], 'C')
assert 'A' in header
assert header['A'] == 'C'
assert header.comments['A'] == ''
assert 'B' in header
assert header['B'] == 'C'
assert header.comments['B'] == ''
def test_header_fromkeys_with_value_and_comment(self):
header = fits.Header.fromkeys(['A'], ('B', 'C'))
assert 'A' in header
assert header['A'] == 'B'
assert header.comments['A'] == 'C'
def test_header_fromkeys_with_duplicates(self):
header = fits.Header.fromkeys(['A', 'B', 'A'], 'C')
assert 'A' in header
assert ('A', 0) in header
assert ('A', 1) in header
assert ('A', 2) not in header
assert header[0] == 'C'
assert header['A'] == 'C'
assert header[('A', 0)] == 'C'
assert header[2] == 'C'
assert header[('A', 1)] == 'C'
def test_header_items(self):
header = fits.Header([('A', 'B'), ('C', 'D')])
assert list(header.items()) == [('A', 'B'), ('C', 'D')]
def test_header_iterkeys(self):
header = fits.Header([('A', 'B'), ('C', 'D')])
for a, b in zip(header.keys(), header):
assert a == b
def test_header_itervalues(self):
header = fits.Header([('A', 'B'), ('C', 'D')])
for a, b in zip(header.values(), ['B', 'D']):
assert a == b
def test_header_keys(self):
with fits.open(self.data('arange.fits')) as hdul:
assert (list(hdul[0].header) ==
['SIMPLE', 'BITPIX', 'NAXIS', 'NAXIS1', 'NAXIS2', 'NAXIS3',
'EXTEND'])
def test_header_list_like_pop(self):
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F'),
('G', 'H')])
last = header.pop()
assert last == 'H'
assert len(header) == 3
assert list(header) == ['A', 'C', 'E']
mid = header.pop(1)
assert mid == 'D'
assert len(header) == 2
assert list(header) == ['A', 'E']
first = header.pop(0)
assert first == 'B'
assert len(header) == 1
assert list(header) == ['E']
pytest.raises(IndexError, header.pop, 42)
def test_header_dict_like_pop(self):
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F'),
('G', 'H')])
pytest.raises(TypeError, header.pop, 'A', 'B', 'C')
last = header.pop('G')
assert last == 'H'
assert len(header) == 3
assert list(header) == ['A', 'C', 'E']
mid = header.pop('C')
assert mid == 'D'
assert len(header) == 2
assert list(header) == ['A', 'E']
first = header.pop('A')
assert first == 'B'
assert len(header) == 1
assert list(header) == ['E']
default = header.pop('X', 'Y')
assert default == 'Y'
assert len(header) == 1
pytest.raises(KeyError, header.pop, 'X')
def test_popitem(self):
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')])
keyword, value = header.popitem()
assert keyword not in header
assert len(header) == 2
keyword, value = header.popitem()
assert keyword not in header
assert len(header) == 1
keyword, value = header.popitem()
assert keyword not in header
assert len(header) == 0
pytest.raises(KeyError, header.popitem)
def test_setdefault(self):
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')])
assert header.setdefault('A') == 'B'
assert header.setdefault('C') == 'D'
assert header.setdefault('E') == 'F'
assert len(header) == 3
assert header.setdefault('G', 'H') == 'H'
assert len(header) == 4
assert 'G' in header
assert header.setdefault('G', 'H') == 'H'
assert len(header) == 4
def test_update_from_dict(self):
"""
Test adding new cards and updating existing cards from a dict using
Header.update()
"""
header = fits.Header([('A', 'B'), ('C', 'D')])
header.update({'A': 'E', 'F': 'G'})
assert header['A'] == 'E'
assert header[0] == 'E'
assert 'F' in header
assert header['F'] == 'G'
assert header[-1] == 'G'
# Same as above but this time pass the update dict as keyword arguments
header = fits.Header([('A', 'B'), ('C', 'D')])
header.update(A='E', F='G')
assert header['A'] == 'E'
assert header[0] == 'E'
assert 'F' in header
assert header['F'] == 'G'
assert header[-1] == 'G'
def test_update_from_iterable(self):
"""
Test adding new cards and updating existing cards from an iterable of
cards and card tuples.
"""
header = fits.Header([('A', 'B'), ('C', 'D')])
header.update([('A', 'E'), fits.Card('F', 'G')])
assert header['A'] == 'E'
assert header[0] == 'E'
assert 'F' in header
assert header['F'] == 'G'
assert header[-1] == 'G'
def test_header_extend(self):
"""
Test extending a header both with and without stripping cards from the
extension header.
"""
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu2.header['MYKEY'] = ('some val', 'some comment')
hdu.header += hdu2.header
assert len(hdu.header) == 5
assert hdu.header[-1] == 'some val'
# Same thing, but using + instead of +=
hdu = fits.PrimaryHDU()
hdu.header = hdu.header + hdu2.header
assert len(hdu.header) == 5
assert hdu.header[-1] == 'some val'
# Directly append the other header in full--not usually a desirable
# operation when the header is coming from another HDU
hdu.header.extend(hdu2.header, strip=False)
assert len(hdu.header) == 11
assert list(hdu.header)[5] == 'XTENSION'
assert hdu.header[-1] == 'some val'
assert ('MYKEY', 1) in hdu.header
def test_header_extend_unique(self):
"""
Test extending the header with and without unique=True.
"""
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu.header['MYKEY'] = ('some val', 'some comment')
hdu2.header['MYKEY'] = ('some other val', 'some other comment')
hdu.header.extend(hdu2.header)
assert len(hdu.header) == 6
assert hdu.header[-2] == 'some val'
assert hdu.header[-1] == 'some other val'
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu.header['MYKEY'] = ('some val', 'some comment')
hdu2.header['MYKEY'] = ('some other val', 'some other comment')
hdu.header.extend(hdu2.header, unique=True)
assert len(hdu.header) == 5
assert hdu.header[-1] == 'some val'
def test_header_extend_unique_commentary(self):
"""
Test extending header with and without unique=True and commentary
cards in the header being added. Issue astropy/astropy#3967
"""
for commentary_card in ['', 'COMMENT', 'HISTORY']:
for is_unique in [True, False]:
hdu = fits.PrimaryHDU()
# Make sure we are testing the case we want.
assert commentary_card not in hdu.header
hdu2 = fits.ImageHDU()
hdu2.header[commentary_card] = 'My text'
hdu.header.extend(hdu2.header, unique=is_unique)
assert len(hdu.header) == 5
assert hdu.header[commentary_card][0] == 'My text'
def test_header_extend_update(self):
"""
Test extending the header with and without update=True.
"""
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu.header['MYKEY'] = ('some val', 'some comment')
hdu.header['HISTORY'] = 'history 1'
hdu2.header['MYKEY'] = ('some other val', 'some other comment')
hdu2.header['HISTORY'] = 'history 1'
hdu2.header['HISTORY'] = 'history 2'
hdu.header.extend(hdu2.header)
assert len(hdu.header) == 9
assert ('MYKEY', 0) in hdu.header
assert ('MYKEY', 1) in hdu.header
assert hdu.header[('MYKEY', 1)] == 'some other val'
assert len(hdu.header['HISTORY']) == 3
assert hdu.header[-1] == 'history 2'
hdu = fits.PrimaryHDU()
hdu.header['MYKEY'] = ('some val', 'some comment')
hdu.header['HISTORY'] = 'history 1'
hdu.header.extend(hdu2.header, update=True)
assert len(hdu.header) == 7
assert ('MYKEY', 0) in hdu.header
assert ('MYKEY', 1) not in hdu.header
assert hdu.header['MYKEY'] == 'some other val'
assert len(hdu.header['HISTORY']) == 2
assert hdu.header[-1] == 'history 2'
def test_header_extend_update_commentary(self):
"""
Test extending header with and without unique=True and commentary
cards in the header being added.
Though not quite the same as astropy/astropy#3967, update=True hits
the same if statement as that issue.
"""
for commentary_card in ['', 'COMMENT', 'HISTORY']:
for is_update in [True, False]:
hdu = fits.PrimaryHDU()
# Make sure we are testing the case we want.
assert commentary_card not in hdu.header
hdu2 = fits.ImageHDU()
hdu2.header[commentary_card] = 'My text'
hdu.header.extend(hdu2.header, update=is_update)
assert len(hdu.header) == 5
assert hdu.header[commentary_card][0] == 'My text'
def test_header_extend_exact(self):
"""
Test that extending an empty header with the contents of an existing
header can exactly duplicate that header, given strip=False and
end=True.
"""
header = fits.getheader(self.data('test0.fits'))
header2 = fits.Header()
header2.extend(header, strip=False, end=True)
assert header == header2
def test_header_count(self):
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')])
assert header.count('A') == 1
assert header.count('C') == 1
assert header.count('E') == 1
header['HISTORY'] = 'a'
header['HISTORY'] = 'b'
assert header.count('HISTORY') == 2
pytest.raises(KeyError, header.count, 'G')
def test_header_append_use_blanks(self):
"""
Tests that blank cards can be appended, and that future appends will
use blank cards when available (unless useblanks=False)
"""
header = fits.Header([('A', 'B'), ('C', 'D')])
# Append a couple blanks
header.append()
header.append()
assert len(header) == 4
assert header[-1] == ''
assert header[-2] == ''
# New card should fill the first blank by default
header.append(('E', 'F'))
assert len(header) == 4
assert header[-2] == 'F'
assert header[-1] == ''
# This card should not use up a blank spot
header.append(('G', 'H'), useblanks=False)
assert len(header) == 5
assert header[-1] == ''
assert header[-2] == 'H'
def test_header_append_keyword_only(self):
"""
Test appending a new card with just the keyword, and no value or
comment given.
"""
header = fits.Header([('A', 'B'), ('C', 'D')])
header.append('E')
assert len(header) == 3
assert list(header)[-1] == 'E'
assert header[-1] is None
assert header.comments['E'] == ''
# Try appending a blank--normally this can be accomplished with just
# header.append(), but header.append('') should also work (and is maybe
# a little more clear)
header.append('')
assert len(header) == 4
assert list(header)[-1] == ''
assert header[''] == ''
assert header.comments[''] == ''
def test_header_insert_use_blanks(self):
header = fits.Header([('A', 'B'), ('C', 'D')])
# Append a couple blanks
header.append()
header.append()
# Insert a new card; should use up one of the blanks
header.insert(1, ('E', 'F'))
assert len(header) == 4
assert header[1] == 'F'
assert header[-1] == ''
assert header[-2] == 'D'
# Insert a new card without using blanks
header.insert(1, ('G', 'H'), useblanks=False)
assert len(header) == 5
assert header[1] == 'H'
assert header[-1] == ''
def test_header_insert_before_keyword(self):
"""
Test that a keyword name or tuple can be used to insert new keywords.
Also tests the ``after`` keyword argument.
Regression test for https://github.com/spacetelescope/PyFITS/issues/12
"""
header = fits.Header([
('NAXIS1', 10), ('COMMENT', 'Comment 1'),
('COMMENT', 'Comment 3')])
header.insert('NAXIS1', ('NAXIS', 2, 'Number of axes'))
assert list(header.keys())[0] == 'NAXIS'
assert header[0] == 2
assert header.comments[0] == 'Number of axes'
header.insert('NAXIS1', ('NAXIS2', 20), after=True)
assert list(header.keys())[1] == 'NAXIS1'
assert list(header.keys())[2] == 'NAXIS2'
assert header[2] == 20
header.insert(('COMMENT', 1), ('COMMENT', 'Comment 2'))
assert header['COMMENT'] == ['Comment 1', 'Comment 2', 'Comment 3']
header.insert(('COMMENT', 2), ('COMMENT', 'Comment 4'), after=True)
assert header['COMMENT'] == ['Comment 1', 'Comment 2', 'Comment 3',
'Comment 4']
header.insert(-1, ('TEST1', True))
assert list(header.keys())[-2] == 'TEST1'
header.insert(-1, ('TEST2', True), after=True)
assert list(header.keys())[-1] == 'TEST2'
assert list(header.keys())[-3] == 'TEST1'
def test_remove(self):
header = fits.Header([('A', 'B'), ('C', 'D')])
# When keyword is present in the header it should be removed.
header.remove('C')
assert len(header) == 1
assert list(header) == ['A']
assert 'C' not in header
# When keyword is not present in the header and ignore_missing is
# False, KeyError should be raised
with pytest.raises(KeyError):
header.remove('F')
# When keyword is not present and ignore_missing is True, KeyError
# will be ignored
header.remove('F', ignore_missing=True)
assert len(header) == 1
# Test for removing all instances of a keyword
header = fits.Header([('A', 'B'), ('C', 'D'), ('A', 'F')])
header.remove('A', remove_all=True)
assert 'A' not in header
assert len(header) == 1
assert list(header) == ['C']
assert header[0] == 'D'
def test_header_comments(self):
header = fits.Header([('A', 'B', 'C'), ('DEF', 'G', 'H')])
assert (repr(header.comments) ==
' A C\n'
' DEF H')
def test_comment_slices_and_filters(self):
header = fits.Header([('AB', 'C', 'D'), ('EF', 'G', 'H'),
('AI', 'J', 'K')])
s = header.comments[1:]
assert list(s) == ['H', 'K']
s = header.comments[::-1]
assert list(s) == ['K', 'H', 'D']
s = header.comments['A*']
assert list(s) == ['D', 'K']
def test_comment_slice_filter_assign(self):
header = fits.Header([('AB', 'C', 'D'), ('EF', 'G', 'H'),
('AI', 'J', 'K')])
header.comments[1:] = 'L'
assert list(header.comments) == ['D', 'L', 'L']
assert header.cards[header.index('AB')].comment == 'D'
assert header.cards[header.index('EF')].comment == 'L'
assert header.cards[header.index('AI')].comment == 'L'
header.comments[::-1] = header.comments[:]
assert list(header.comments) == ['L', 'L', 'D']
header.comments['A*'] = ['M', 'N']
assert list(header.comments) == ['M', 'L', 'N']
def test_commentary_slicing(self):
header = fits.Header()
indices = list(range(5))
for idx in indices:
header['HISTORY'] = idx
# Just a few sample slice types; this won't get all corner cases but if
# these all work we should be in good shape
assert header['HISTORY'][1:] == indices[1:]
assert header['HISTORY'][:3] == indices[:3]
assert header['HISTORY'][:6] == indices[:6]
assert header['HISTORY'][:-2] == indices[:-2]
assert header['HISTORY'][::-1] == indices[::-1]
assert header['HISTORY'][1::-1] == indices[1::-1]
assert header['HISTORY'][1:5:2] == indices[1:5:2]
# Same tests, but copy the values first; as it turns out this is
# different from just directly doing an __eq__ as in the first set of
# assertions
header.insert(0, ('A', 'B', 'C'))
header.append(('D', 'E', 'F'), end=True)
assert list(header['HISTORY'][1:]) == indices[1:]
assert list(header['HISTORY'][:3]) == indices[:3]
assert list(header['HISTORY'][:6]) == indices[:6]
assert list(header['HISTORY'][:-2]) == indices[:-2]
assert list(header['HISTORY'][::-1]) == indices[::-1]
assert list(header['HISTORY'][1::-1]) == indices[1::-1]
assert list(header['HISTORY'][1:5:2]) == indices[1:5:2]
def test_update_commentary(self):
header = fits.Header()
header['FOO'] = 'BAR'
header['HISTORY'] = 'ABC'
header['FRED'] = 'BARNEY'
header['HISTORY'] = 'DEF'
header['HISTORY'] = 'GHI'
assert header['HISTORY'] == ['ABC', 'DEF', 'GHI']
# Single value update
header['HISTORY'][0] = 'FOO'
assert header['HISTORY'] == ['FOO', 'DEF', 'GHI']
# Single value partial slice update
header['HISTORY'][1:] = 'BAR'
assert header['HISTORY'] == ['FOO', 'BAR', 'BAR']
# Multi-value update
header['HISTORY'][:] = ['BAZ', 'QUX']
assert header['HISTORY'] == ['BAZ', 'QUX', 'BAR']
def test_commentary_comparison(self):
"""
Regression test for an issue found in *writing* the regression test for
https://github.com/astropy/astropy/issues/2363, where comparison of
the list of values for a commentary keyword did not always compare
correctly with other iterables.
"""
header = fits.Header()
header['HISTORY'] = 'hello world'
header['HISTORY'] = 'hello world'
header['COMMENT'] = 'hello world'
assert header['HISTORY'] != header['COMMENT']
header['COMMENT'] = 'hello world'
assert header['HISTORY'] == header['COMMENT']
def test_long_commentary_card(self):
header = fits.Header()
header['FOO'] = 'BAR'
header['BAZ'] = 'QUX'
longval = 'ABC' * 30
header['HISTORY'] = longval
header['FRED'] = 'BARNEY'
header['HISTORY'] = longval
assert len(header) == 7
assert list(header)[2] == 'FRED'
assert str(header.cards[3]) == 'HISTORY ' + longval[:72]
assert str(header.cards[4]).rstrip() == 'HISTORY ' + longval[72:]
header.set('HISTORY', longval, after='FOO')
assert len(header) == 9
assert str(header.cards[1]) == 'HISTORY ' + longval[:72]
assert str(header.cards[2]).rstrip() == 'HISTORY ' + longval[72:]
header = fits.Header()
header.update({'FOO': 'BAR'})
header.update({'BAZ': 'QUX'})
longval = 'ABC' * 30
header.add_history(longval)
header.update({'FRED': 'BARNEY'})
header.add_history(longval)
assert len(header.cards) == 7
assert header.cards[2].keyword == 'FRED'
assert str(header.cards[3]) == 'HISTORY ' + longval[:72]
assert str(header.cards[4]).rstrip() == 'HISTORY ' + longval[72:]
header.add_history(longval, after='FOO')
assert len(header.cards) == 9
assert str(header.cards[1]) == 'HISTORY ' + longval[:72]
assert str(header.cards[2]).rstrip() == 'HISTORY ' + longval[72:]
def test_totxtfile(self):
with fits.open(self.data('test0.fits')) as hdul:
hdul[0].header.totextfile(self.temp('header.txt'))
hdu = fits.ImageHDU()
hdu.header.update({'MYKEY': 'FOO'})
hdu.header.extend(hdu.header.fromtextfile(self.temp('header.txt')),
update=True, update_first=True)
# Write the hdu out and read it back in again--it should be recognized
# as a PrimaryHDU
hdu.writeto(self.temp('test.fits'), output_verify='ignore')
with fits.open(self.temp('test.fits')) as hdul:
assert isinstance(hdul[0], fits.PrimaryHDU)
hdu = fits.ImageHDU()
hdu.header.update({'MYKEY': 'FOO'})
hdu.header.extend(hdu.header.fromtextfile(self.temp('header.txt')),
update=True, update_first=True, strip=False)
assert 'MYKEY' in hdu.header
assert 'EXTENSION' not in hdu.header
assert 'SIMPLE' in hdu.header
hdu.writeto(self.temp('test.fits'), output_verify='ignore',
overwrite=True)
with fits.open(self.temp('test.fits')) as hdul2:
assert len(hdul2) == 2
assert 'MYKEY' in hdul2[1].header
def test_fromfile(self):
"""Regression test for https://github.com/astropy/astropy/issues/8711
"""
filename = self.data('scale.fits')
hdr = fits.Header.fromfile(filename)
assert hdr['DATASET'] == '2MASS'
def test_header_fromtextfile(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/122
Manually write a text file containing some header cards ending with
newlines and ensure that fromtextfile can read them back in.
"""
header = fits.Header()
header['A'] = ('B', 'C')
header['B'] = ('C', 'D')
header['C'] = ('D', 'E')
with open(self.temp('test.hdr'), 'w') as f:
f.write('\n'.join(str(c).strip() for c in header.cards))
header2 = fits.Header.fromtextfile(self.temp('test.hdr'))
assert header == header2
def test_header_fromtextfile_with_end_card(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/154
Make sure that when a Header is read from a text file that the END card
is ignored.
"""
header = fits.Header([('A', 'B', 'C'), ('D', 'E', 'F')])
# We don't use header.totextfile here because it writes each card with
# trailing spaces to pad them out to 80 characters. But this bug only
# presents itself when each card ends immediately with a newline, and
# no trailing spaces
with open(self.temp('test.hdr'), 'w') as f:
f.write('\n'.join(str(c).strip() for c in header.cards))
f.write('\nEND')
new_header = fits.Header.fromtextfile(self.temp('test.hdr'))
assert 'END' not in new_header
assert header == new_header
def test_append_end_card(self):
"""
Regression test 2 for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/154
Manually adding an END card to a header should simply result in a
ValueError (as was the case in PyFITS 3.0 and earlier).
"""
header = fits.Header([('A', 'B', 'C'), ('D', 'E', 'F')])
def setitem(k, v):
header[k] = v
pytest.raises(ValueError, setitem, 'END', '')
pytest.raises(ValueError, header.append, 'END')
pytest.raises(ValueError, header.append, 'END', end=True)
pytest.raises(ValueError, header.insert, len(header), 'END')
pytest.raises(ValueError, header.set, 'END')
def test_invalid_end_cards(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/217
This tests the case where the END card looks like a normal card like
'END = ' and other similar oddities. As long as a card starts with END
and looks like it was intended to be the END card we allow it, but with
a warning.
"""
horig = fits.PrimaryHDU(data=np.arange(100)).header
def invalid_header(end, pad):
# Build up a goofy invalid header
# Start from a seemingly normal header
s = horig.tostring(sep='', endcard=False, padding=False)
# append the bogus end card
s += end
# add additional padding if requested
if pad:
s += ' ' * _pad_length(len(s))
# This will differ between Python versions
if isinstance(s, bytes):
return BytesIO(s)
else:
return StringIO(s)
# Basic case motivated by the original issue; it's as if the END card
# was appended by software that doesn't know to treat it specially, and
# it is given an = after it
s = invalid_header('END =', True)
with pytest.warns(AstropyUserWarning, match="Unexpected bytes trailing "
"END keyword: ' ='") as w:
h = fits.Header.fromfile(s)
assert h == horig
assert len(w) == 1
# A case similar to the last but with more spaces between END and the
# =, as though the '= ' value indicator were placed like that of a
# normal card
s = invalid_header('END = ', True)
with pytest.warns(AstropyUserWarning, match="Unexpected bytes trailing "
"END keyword: ' ='") as w:
h = fits.Header.fromfile(s)
assert h == horig
assert len(w) == 1
# END card with trailing gibberish
s = invalid_header('END$%&%^*%*', True)
with pytest.warns(AstropyUserWarning, match=r"Unexpected bytes trailing "
r"END keyword: '\$%&%\^\*%\*'") as w:
h = fits.Header.fromfile(s)
assert h == horig
assert len(w) == 1
# 'END' at the very end of a truncated file without padding; the way
# the block reader works currently this can only happen if the 'END'
# is at the very end of the file.
s = invalid_header('END', False)
with pytest.warns(AstropyUserWarning, match="Missing padding to end of "
"the FITS block") as w:
# Don't raise an exception on missing padding, but still produce a
# warning that the END card is incomplete
h = fits.Header.fromfile(s, padding=False)
assert h == horig
assert len(w) == 1
def test_invalid_characters(self):
"""
Test header with invalid characters
"""
# Generate invalid file with non-ASCII character
h = fits.Header()
h['FOO'] = 'BAR'
h['COMMENT'] = 'hello'
hdul = fits.PrimaryHDU(header=h, data=np.arange(5))
hdul.writeto(self.temp('test.fits'))
with open(self.temp('test.fits'), 'rb') as f:
out = f.read()
out = out.replace(b'hello', 'héllo'.encode('latin1'))
out = out.replace(b'BAR', 'BÀR'.encode('latin1'))
with open(self.temp('test2.fits'), 'wb') as f2:
f2.write(out)
with pytest.warns(AstropyUserWarning, match="non-ASCII characters are "
"present in the FITS file") as w:
h = fits.getheader(self.temp('test2.fits'))
assert h['FOO'] == 'B?R'
assert h['COMMENT'] == 'h?llo'
assert len(w) == 1
def test_unnecessary_move(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/125
Ensures that a header is not modified when setting the position of a
keyword that's already in its correct position.
"""
header = fits.Header([('A', 'B'), ('B', 'C'), ('C', 'D')])
header.set('B', before=2)
assert list(header) == ['A', 'B', 'C']
assert not header._modified
header.set('B', after=0)
assert list(header) == ['A', 'B', 'C']
assert not header._modified
header.set('B', before='C')
assert list(header) == ['A', 'B', 'C']
assert not header._modified
header.set('B', after='A')
assert list(header) == ['A', 'B', 'C']
assert not header._modified
header.set('B', before=2)
assert list(header) == ['A', 'B', 'C']
assert not header._modified
# 123 is well past the end, and C is already at the end, so it's in the
# right place already
header.set('C', before=123)
assert list(header) == ['A', 'B', 'C']
assert not header._modified
header.set('C', after=123)
assert list(header) == ['A', 'B', 'C']
assert not header._modified
def test_invalid_float_cards(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/137"""
# Create a header containing two of the problematic cards in the test
# case where this came up:
hstr = "FOCALLEN= +1.550000000000e+002\nAPERTURE= +0.000000000000e+000"
h = fits.Header.fromstring(hstr, sep='\n')
# First the case that *does* work prior to fixing this issue
assert h['FOCALLEN'] == 155.0
assert h['APERTURE'] == 0.0
# Now if this were reserialized, would new values for these cards be
# written with repaired exponent signs?
with pytest.warns(fits.verify.VerifyWarning,
match=r'Verification reported errors'):
assert (str(h.cards['FOCALLEN']) ==
_pad("FOCALLEN= +1.550000000000E+002"))
assert h.cards['FOCALLEN']._modified
with pytest.warns(fits.verify.VerifyWarning,
match=r'Verification reported errors'):
assert (str(h.cards['APERTURE']) ==
_pad("APERTURE= +0.000000000000E+000"))
assert h.cards['APERTURE']._modified
assert h._modified
# This is the case that was specifically causing problems; generating
# the card strings *before* parsing the values. Also, the card strings
# really should be "fixed" before being returned to the user
h = fits.Header.fromstring(hstr, sep='\n')
with pytest.warns(fits.verify.VerifyWarning,
match=r'Verification reported errors'):
assert (str(h.cards['FOCALLEN']) ==
_pad("FOCALLEN= +1.550000000000E+002"))
assert h.cards['FOCALLEN']._modified
with pytest.warns(fits.verify.VerifyWarning,
match=r'Verification reported errors'):
assert (str(h.cards['APERTURE']) ==
_pad("APERTURE= +0.000000000000E+000"))
assert h.cards['APERTURE']._modified
assert h['FOCALLEN'] == 155.0
assert h['APERTURE'] == 0.0
assert h._modified
# For the heck of it, try assigning the identical values and ensure
# that the newly fixed value strings are left intact
h['FOCALLEN'] = 155.0
h['APERTURE'] = 0.0
assert (str(h.cards['FOCALLEN']) ==
_pad("FOCALLEN= +1.550000000000E+002"))
assert (str(h.cards['APERTURE']) ==
_pad("APERTURE= +0.000000000000E+000"))
def test_invalid_float_cards2(self, capsys):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/140
"""
# The example for this test requires creating a FITS file containing a
# slightly misformatted float value. I can't actually even find a way
# to do that directly through Astropy--it won't let me.
hdu = fits.PrimaryHDU()
hdu.header['TEST'] = 5.0022221e-07
hdu.writeto(self.temp('test.fits'))
# Here we manually make the file invalid
with open(self.temp('test.fits'), 'rb+') as f:
f.seek(346) # Location of the exponent 'E' symbol
f.write(encode_ascii('e'))
with fits.open(self.temp('test.fits')) as hdul, \
pytest.warns(AstropyUserWarning) as w:
hdul.writeto(self.temp('temp.fits'), output_verify='warn')
assert len(w) == 5
# The first two warnings are just the headers to the actual warning
# message (HDU 0, Card 4). I'm still not sure things like that
# should be output as separate warning messages, but that's
# something to think about...
msg = str(w[3].message)
assert "(invalid value string: '5.0022221e-07')" in msg
def test_leading_zeros(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/137, part 2
Ticket https://aeon.stsci.edu/ssb/trac/pyfits/ticket/137 also showed that in
float values like 0.001 the leading zero was unnecessarily being
stripped off when rewriting the header. Though leading zeros should be
removed from integer values to prevent misinterpretation as octal by
python (for now Astropy will still maintain the leading zeros if now
changes are made to the value, but will drop them if changes are made).
"""
c = fits.Card.fromstring("APERTURE= +0.000000000000E+000")
assert str(c) == _pad("APERTURE= +0.000000000000E+000")
assert c.value == 0.0
c = fits.Card.fromstring("APERTURE= 0.000000000000E+000")
assert str(c) == _pad("APERTURE= 0.000000000000E+000")
assert c.value == 0.0
c = fits.Card.fromstring("APERTURE= 017")
assert str(c) == _pad("APERTURE= 017")
assert c.value == 17
def test_assign_boolean(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/123
Tests assigning Python and Numpy boolean values to keyword values.
"""
fooimg = _pad('FOO = T')
barimg = _pad('BAR = F')
h = fits.Header()
h['FOO'] = True
h['BAR'] = False
assert h['FOO'] is True
assert h['BAR'] is False
assert str(h.cards['FOO']) == fooimg
assert str(h.cards['BAR']) == barimg
h = fits.Header()
h['FOO'] = np.bool_(True)
h['BAR'] = np.bool_(False)
assert h['FOO'] is True
assert h['BAR'] is False
assert str(h.cards['FOO']) == fooimg
assert str(h.cards['BAR']) == barimg
h = fits.Header()
h.append(fits.Card.fromstring(fooimg))
h.append(fits.Card.fromstring(barimg))
assert h['FOO'] is True
assert h['BAR'] is False
assert str(h.cards['FOO']) == fooimg
assert str(h.cards['BAR']) == barimg
def test_header_method_keyword_normalization(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/149
Basically ensures that all public Header methods are case-insensitive
w.r.t. keywords.
Provides a reasonably comprehensive test of several methods at once.
"""
h = fits.Header([('abC', 1), ('Def', 2), ('GeH', 3)])
assert list(h) == ['ABC', 'DEF', 'GEH']
assert 'abc' in h
assert 'dEf' in h
assert h['geh'] == 3
# Case insensitivity of wildcards
assert len(h['g*']) == 1
h['aBc'] = 2
assert h['abc'] == 2
# ABC already existed so assigning to aBc should not have added any new
# cards
assert len(h) == 3
del h['gEh']
assert list(h) == ['ABC', 'DEF']
assert len(h) == 2
assert h.get('def') == 2
h.set('Abc', 3)
assert h['ABC'] == 3
h.set('gEh', 3, before='Abc')
assert list(h) == ['GEH', 'ABC', 'DEF']
assert h.pop('abC') == 3
assert len(h) == 2
assert h.setdefault('def', 3) == 2
assert len(h) == 2
assert h.setdefault('aBc', 1) == 1
assert len(h) == 3
assert list(h) == ['GEH', 'DEF', 'ABC']
h.update({'GeH': 1, 'iJk': 4})
assert len(h) == 4
assert list(h) == ['GEH', 'DEF', 'ABC', 'IJK']
assert h['GEH'] == 1
assert h.count('ijk') == 1
assert h.index('ijk') == 3
h.remove('Def')
assert len(h) == 3
assert list(h) == ['GEH', 'ABC', 'IJK']
def test_end_in_comment(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/142
Tests a case where the comment of a card ends with END, and is followed
by several blank cards.
"""
data = np.arange(100).reshape(10, 10)
hdu = fits.PrimaryHDU(data=data)
hdu.header['TESTKW'] = ('Test val', 'This is the END')
# Add a couple blanks after the END string
hdu.header.append()
hdu.header.append()
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits'), memmap=False) as hdul:
# memmap = False to avoid leaving open a mmap to the file when we
# access the data--this causes problems on Windows when we try to
# overwrite the file later
assert 'TESTKW' in hdul[0].header
assert hdul[0].header == hdu.header
assert (hdul[0].data == data).all()
# Add blanks until the header is extended to two block sizes
while len(hdu.header) < 36:
hdu.header.append()
hdu.writeto(self.temp('test.fits'), overwrite=True)
with fits.open(self.temp('test.fits')) as hdul:
assert 'TESTKW' in hdul[0].header
assert hdul[0].header == hdu.header
assert (hdul[0].data == data).all()
# Test parsing the same header when it's written to a text file
hdu.header.totextfile(self.temp('test.hdr'))
header2 = fits.Header.fromtextfile(self.temp('test.hdr'))
assert hdu.header == header2
def test_assign_unicode(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/134
Assigning a unicode literal as a header value should not fail silently.
If the value can be converted to ASCII then it should just work.
Otherwise it should fail with an appropriate value error.
Also tests unicode for keywords and comments.
"""
erikku = '\u30a8\u30ea\u30c3\u30af'
def assign(keyword, val):
h[keyword] = val
h = fits.Header()
h['FOO'] = 'BAR'
assert 'FOO' in h
assert h['FOO'] == 'BAR'
assert repr(h) == _pad("FOO = 'BAR '")
pytest.raises(ValueError, assign, erikku, 'BAR')
h['FOO'] = 'BAZ'
assert h['FOO'] == 'BAZ'
assert repr(h) == _pad("FOO = 'BAZ '")
pytest.raises(ValueError, assign, 'FOO', erikku)
h['FOO'] = ('BAR', 'BAZ')
assert h['FOO'] == 'BAR'
assert h.comments['FOO'] == 'BAZ'
assert repr(h) == _pad("FOO = 'BAR ' / BAZ")
pytest.raises(ValueError, assign, 'FOO', ('BAR', erikku))
pytest.raises(ValueError, assign, 'FOO', (erikku, 'BAZ'))
pytest.raises(ValueError, assign, 'FOO', (erikku, erikku))
def test_assign_non_ascii(self):
"""
First regression test for
https://github.com/spacetelescope/PyFITS/issues/37
Although test_assign_unicode ensures that `str` objects containing
non-ASCII characters cannot be assigned to headers.
It should not be possible to assign bytes to a header at all.
"""
h = fits.Header()
with pytest.raises(ValueError, match="Illegal value: b'Hello'."):
h.set('TEST', b'Hello')
def test_header_strip_whitespace(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/146, and
for the solution that is optional stripping of whitespace from the end
of a header value.
By default extra whitespace is stripped off, but if
`fits.conf.strip_header_whitespace` = False it should not be
stripped.
"""
h = fits.Header()
h['FOO'] = 'Bar '
assert h['FOO'] == 'Bar'
c = fits.Card.fromstring("QUX = 'Bar '")
h.append(c)
assert h['QUX'] == 'Bar'
assert h.cards['FOO'].image.rstrip() == "FOO = 'Bar '"
assert h.cards['QUX'].image.rstrip() == "QUX = 'Bar '"
with fits.conf.set_temp('strip_header_whitespace', False):
assert h['FOO'] == 'Bar '
assert h['QUX'] == 'Bar '
assert h.cards['FOO'].image.rstrip() == "FOO = 'Bar '"
assert h.cards['QUX'].image.rstrip() == "QUX = 'Bar '"
assert h['FOO'] == 'Bar'
assert h['QUX'] == 'Bar'
assert h.cards['FOO'].image.rstrip() == "FOO = 'Bar '"
assert h.cards['QUX'].image.rstrip() == "QUX = 'Bar '"
def test_keep_duplicate_history_in_orig_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/156
When creating a new HDU from an existing Header read from an existing
FITS file, if the original header contains duplicate HISTORY values
those duplicates should be preserved just as in the original header.
This bug occurred due to naivete in Header.extend.
"""
history = ['CCD parameters table ...',
' reference table oref$n951041ko_ccd.fits',
' INFLIGHT 12/07/2001 25/02/2002',
' all bias frames'] * 3
hdu = fits.PrimaryHDU()
# Add the history entries twice
for item in history:
hdu.header['HISTORY'] = item
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert hdul[0].header['HISTORY'] == history
new_hdu = fits.PrimaryHDU(header=hdu.header)
assert new_hdu.header['HISTORY'] == hdu.header['HISTORY']
new_hdu.writeto(self.temp('test2.fits'))
with fits.open(self.temp('test2.fits')) as hdul:
assert hdul[0].header['HISTORY'] == history
def test_invalid_keyword_cards(self):
"""
Test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/109
Allow opening files with headers containing invalid keywords.
"""
# Create a header containing a few different types of BAD headers.
c1 = fits.Card.fromstring('CLFIND2D: contour = 0.30')
c2 = fits.Card.fromstring('Just some random text.')
c3 = fits.Card.fromstring('A' * 80)
hdu = fits.PrimaryHDU()
# This should work with some warnings
with pytest.warns(AstropyUserWarning) as w:
hdu.header.append(c1)
hdu.header.append(c2)
hdu.header.append(c3)
assert len(w) == 3
hdu.writeto(self.temp('test.fits'))
with pytest.warns(AstropyUserWarning) as w:
with fits.open(self.temp('test.fits')) as hdul:
# Merely opening the file should blast some warnings about the
# invalid keywords
assert len(w) == 3
header = hdul[0].header
assert 'CLFIND2D' in header
assert 'Just som' in header
assert 'AAAAAAAA' in header
assert header['CLFIND2D'] == ': contour = 0.30'
assert header['Just som'] == 'e random text.'
assert header['AAAAAAAA'] == 'A' * 72
# It should not be possible to assign to the invalid keywords
pytest.raises(ValueError, header.set, 'CLFIND2D', 'foo')
pytest.raises(ValueError, header.set, 'Just som', 'foo')
pytest.raises(ValueError, header.set, 'AAAAAAAA', 'foo')
def test_fix_hierarch_with_invalid_value(self, capsys):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/172
Ensures that when fixing a hierarch card it remains a hierarch card.
"""
c = fits.Card.fromstring('HIERARCH ESO DET CHIP PXSPACE = 5e6')
with pytest.warns(fits.verify.VerifyWarning,
match=r'Verification reported errors'):
c.verify('fix')
assert str(c) == _pad('HIERARCH ESO DET CHIP PXSPACE = 5E6')
def test_assign_inf_nan(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/11
For the time being it should not be possible to assign the floating
point values inf or nan to a header value, since this is not defined by
the FITS standard.
"""
h = fits.Header()
pytest.raises(ValueError, h.set, 'TEST', float('nan'))
pytest.raises(ValueError, h.set, 'TEST', np.nan)
pytest.raises(ValueError, h.set, 'TEST', np.float32('nan'))
pytest.raises(ValueError, h.set, 'TEST', float('inf'))
pytest.raises(ValueError, h.set, 'TEST', np.inf)
def test_update_bool(self):
"""
Regression test for an issue where a value of True in a header
cannot be updated to a value of 1, and likewise for False/0.
"""
h = fits.Header([('TEST', True)])
h['TEST'] = 1
assert h['TEST'] is not True
assert isinstance(h['TEST'], int)
assert h['TEST'] == 1
h['TEST'] = np.bool_(True)
assert h['TEST'] is True
h['TEST'] = False
assert h['TEST'] is False
h['TEST'] = np.bool_(False)
assert h['TEST'] is False
h['TEST'] = 0
assert h['TEST'] is not False
assert isinstance(h['TEST'], int)
assert h['TEST'] == 0
h['TEST'] = np.bool_(False)
assert h['TEST'] is False
def test_update_numeric(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/49
Ensure that numeric values can be upcast/downcast between int, float,
and complex by assigning values that compare equal to the existing
value but are a different type.
"""
h = fits.Header()
h['TEST'] = 1
# int -> float
h['TEST'] = 1.0
assert isinstance(h['TEST'], float)
assert str(h).startswith('TEST = 1.0')
# float -> int
h['TEST'] = 1
assert isinstance(h['TEST'], int)
assert str(h).startswith('TEST = 1')
# int -> complex
h['TEST'] = 1.0+0.0j
assert isinstance(h['TEST'], complex)
assert str(h).startswith('TEST = (1.0, 0.0)')
# complex -> float
h['TEST'] = 1.0
assert isinstance(h['TEST'], float)
assert str(h).startswith('TEST = 1.0')
# float -> complex
h['TEST'] = 1.0+0.0j
assert isinstance(h['TEST'], complex)
assert str(h).startswith('TEST = (1.0, 0.0)')
# complex -> int
h['TEST'] = 1
assert isinstance(h['TEST'], int)
assert str(h).startswith('TEST = 1')
# Now the same tests but with zeros
h['TEST'] = 0
# int -> float
h['TEST'] = 0.0
assert isinstance(h['TEST'], float)
assert str(h).startswith('TEST = 0.0')
# float -> int
h['TEST'] = 0
assert isinstance(h['TEST'], int)
assert str(h).startswith('TEST = 0')
# int -> complex
h['TEST'] = 0.0+0.0j
assert isinstance(h['TEST'], complex)
assert str(h).startswith('TEST = (0.0, 0.0)')
# complex -> float
h['TEST'] = 0.0
assert isinstance(h['TEST'], float)
assert str(h).startswith('TEST = 0.0')
# float -> complex
h['TEST'] = 0.0+0.0j
assert isinstance(h['TEST'], complex)
assert str(h).startswith('TEST = (0.0, 0.0)')
# complex -> int
h['TEST'] = 0
assert isinstance(h['TEST'], int)
assert str(h).startswith('TEST = 0')
def test_newlines_in_commentary(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/51
Test data extracted from a header in an actual FITS file found in the
wild. Names have been changed to protect the innocent.
"""
# First ensure that we can't assign new keyword values with newlines in
# them
h = fits.Header()
pytest.raises(ValueError, h.set, 'HISTORY', '\n')
pytest.raises(ValueError, h.set, 'HISTORY', '\nabc')
pytest.raises(ValueError, h.set, 'HISTORY', 'abc\n')
pytest.raises(ValueError, h.set, 'HISTORY', 'abc\ndef')
test_cards = [
"HISTORY File modified by user 'wilma' with fv on 2013-04-22T21:42:18 "
"HISTORY File modified by user ' fred' with fv on 2013-04-23T11:16:29 "
"HISTORY File modified by user ' fred' with fv on 2013-11-04T16:59:14 "
"HISTORY File modified by user 'wilma' with fv on 2013-04-22T21:42:18\nFile modif"
"HISTORY ied by user 'wilma' with fv on 2013-04-23T11:16:29\nFile modified by use"
"HISTORY r ' fred' with fv on 2013-11-04T16:59:14 "
"HISTORY File modified by user 'wilma' with fv on 2013-04-22T21:42:18\nFile modif"
"HISTORY ied by user 'wilma' with fv on 2013-04-23T11:16:29\nFile modified by use"
"HISTORY r ' fred' with fv on 2013-11-04T16:59:14\nFile modified by user 'wilma' "
"HISTORY with fv on 2013-04-22T21:42:18\nFile modif\nied by user 'wilma' with fv "
"HISTORY on 2013-04-23T11:16:29\nFile modified by use\nr ' fred' with fv on 2013-1"
"HISTORY 1-04T16:59:14 "
]
for card_image in test_cards:
c = fits.Card.fromstring(card_image)
if '\n' in card_image:
pytest.raises(fits.VerifyError, c.verify, 'exception')
else:
c.verify('exception')
def test_long_commentary_card_appended_to_header(self):
"""
If a HISTORY or COMMENT card with a too-long value is appended to a
header with Header.append (as opposed to assigning to hdr['HISTORY']
it fails verification.
Regression test for https://github.com/astropy/astropy/issues/11486
"""
header = fits.Header()
value = 'abc' * 90
# this is what Table does when saving its history metadata key to a
# FITS file
header.append(('history', value))
assert len(header.cards) == 1
# Test Card._split() directly since this was the main problem area
key, val = header.cards[0]._split()
assert key == 'HISTORY' and val == value
# Try writing adding this header to an HDU and writing it to a file
hdu = fits.PrimaryHDU(header=header)
hdu.writeto(self.temp('test.fits'), overwrite=True)
def test_header_fromstring_bytes(self):
"""
Test reading a Header from a `bytes` string.
See https://github.com/astropy/astropy/issues/8706
"""
with open(self.data('test0.fits'), 'rb') as fobj:
pri_hdr_from_bytes = fits.Header.fromstring(fobj.read())
pri_hdr = fits.getheader(self.data('test0.fits'))
assert pri_hdr['NAXIS'] == pri_hdr_from_bytes['NAXIS']
assert pri_hdr == pri_hdr_from_bytes
assert pri_hdr.tostring() == pri_hdr_from_bytes.tostring()
def test_set_keyword_with_space(self):
"""
Regression test for https://github.com/astropy/astropy/issues/10479
"""
hdr = fits.Header()
hdr['KEY2 '] = 2
hdr['KEY2 '] = 4
assert len(hdr) == 1
assert hdr['KEY2'] == 4
assert hdr['KEY2 '] == 4
def test_strip(self):
hdr = fits.getheader(self.data('tb.fits'), ext=1)
hdr['FOO'] = 'bar'
hdr.strip()
assert set(hdr) == {'HISTORY', 'FOO'}
hdr = fits.getheader(self.data('tb.fits'), ext=1)
hdr['FOO'] = 'bar'
hdr = hdr.copy(strip=True)
assert set(hdr) == {'HISTORY', 'FOO'}
def test_update_invalid_card(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5408
Tests updating the value of a card that is malformatted (with an
invalid value literal).
This tests two ways of reproducing the problem, one working with a
Card object directly, and one when reading/writing a header containing
such an invalid card.
"""
card = fits.Card.fromstring('KW = INF / Comment')
card.value = 'FIXED'
assert tuple(card) == ('KW', 'FIXED', 'Comment')
card.verify('fix')
assert tuple(card) == ('KW', 'FIXED', 'Comment')
card = fits.Card.fromstring('KW = INF')
hdu = fits.PrimaryHDU()
# This is a loophole to write a header containing a malformatted card
card._verified = True
hdu.header.append(card)
hdu.header.tofile(self.temp('bogus.fits'))
with fits.open(self.temp('bogus.fits')) as hdul:
hdul[0].header['KW'] = -1
hdul.writeto(self.temp('bogus_fixed.fits'))
with fits.open(self.temp('bogus_fixed.fits')) as hdul:
assert hdul[0].header['KW'] == -1
def test_index_numpy_int(self):
header = fits.Header([('A', 'FOO'), ('B', 2), ('C', 'BAR')])
idx = np.int8(2)
assert header[idx] == 'BAR'
header[idx] = 'BAZ'
assert header[idx] == 'BAZ'
header.insert(idx, ('D', 42))
assert header[idx] == 42
header.add_comment('HELLO')
header.add_comment('WORLD')
assert header['COMMENT'][np.int64(1)] == 'WORLD'
header.append(('C', 'BAZBAZ'))
assert header[('C', np.int16(0))] == 'BAZ'
assert header[('C', np.uint32(1))] == 'BAZBAZ'
def test_header_data_size(self):
"""
Tests data size calculation (w/o padding) given a Header.
"""
hdu = fits.PrimaryHDU()
header = hdu.header
assert header.data_size == 0
header['BITPIX'] = 32
header['NAXIS'] = 2
header['NAXIS1'] = 100
header['NAXIS2'] = 100
assert header.data_size == 40000
assert header.data_size_padded == 40320
class TestRecordValuedKeywordCards(FitsTestCase):
"""
Tests for handling of record-valued keyword cards as used by the
`FITS WCS distortion paper
<https://www.atnf.csiro.au/people/mcalabre/WCS/dcs_20040422.pdf>`__.
These tests are derived primarily from the release notes for PyFITS 1.4 (in
which this feature was first introduced.
Note that extra leading spaces in the `value` fields should be parsed on input,
but will be stripped in the cards.
"""
def setup(self):
super().setup()
self._test_header = fits.Header()
self._test_header.set('DP1', 'NAXIS: 2')
self._test_header.set('DP1', 'AXIS.1: 1')
self._test_header.set('DP1', 'AXIS.2: 2')
self._test_header.set('DP1', 'NAUX: 2')
self._test_header.set('DP1', 'AUX.1.COEFF.0: 0')
self._test_header.set('DP1', 'AUX.1.POWER.0: 1')
self._test_header.set('DP1', 'AUX.1.COEFF.1: 0.00048828125')
self._test_header.set('DP1', 'AUX.1.POWER.1: 1')
def test_initialize_rvkc(self):
"""
Test different methods for initializing a card that should be
recognized as a RVKC
"""
c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment")
assert c.keyword == 'DP1.NAXIS'
assert c.value == 2.0
assert c.field_specifier == 'NAXIS'
assert c.comment == 'A comment'
c = fits.Card.fromstring("DP1 = 'NAXIS: 2.1'")
assert c.keyword == 'DP1.NAXIS'
assert c.value == 2.1
assert c.field_specifier == 'NAXIS'
c = fits.Card.fromstring("DP1 = 'NAXIS: a'")
assert c.keyword == 'DP1'
assert c.value == 'NAXIS: a'
assert c.field_specifier is None
c = fits.Card('DP1', 'NAXIS: 2')
assert c.keyword == 'DP1.NAXIS'
assert c.value == 2.0
assert c.field_specifier == 'NAXIS'
c = fits.Card('DP1', 'NAXIS: 2.0')
assert c.keyword == 'DP1.NAXIS'
assert c.value == 2.0
assert c.field_specifier == 'NAXIS'
c = fits.Card('DP1', 'NAXIS: a')
assert c.keyword == 'DP1'
assert c.value == 'NAXIS: a'
assert c.field_specifier is None
c = fits.Card('DP1.NAXIS', 2)
assert c.keyword == 'DP1.NAXIS'
assert c.value == 2.0
assert c.field_specifier == 'NAXIS'
c = fits.Card('DP1.NAXIS', 2.0)
assert c.keyword == 'DP1.NAXIS'
assert c.value == 2.0
assert c.field_specifier == 'NAXIS'
with pytest.warns(fits.verify.VerifyWarning):
c = fits.Card('DP1.NAXIS', 'a')
assert c.keyword == 'DP1.NAXIS'
assert c.value == 'a'
assert c.field_specifier is None
def test_parse_field_specifier(self):
"""
Tests that the field_specifier can accessed from a card read from a
string before any other attributes are accessed.
"""
c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment")
assert c.field_specifier == 'NAXIS'
assert c.keyword == 'DP1.NAXIS'
assert c.value == 2.0
assert c.comment == 'A comment'
def test_update_field_specifier(self):
"""
Test setting the field_specifier attribute and updating the card image
to reflect the new value.
"""
c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment")
assert c.field_specifier == 'NAXIS'
c.field_specifier = 'NAXIS1'
assert c.field_specifier == 'NAXIS1'
assert c.keyword == 'DP1.NAXIS1'
assert c.value == 2.0
assert c.comment == 'A comment'
assert str(c).rstrip() == "DP1 = 'NAXIS1: 2' / A comment"
def test_field_specifier_case_senstivity(self):
"""
The keyword portion of an RVKC should still be case-insensitive, but
the field-specifier portion should be case-sensitive.
"""
header = fits.Header()
header.set('abc.def', 1)
header.set('abc.DEF', 2)
assert header['abc.def'] == 1
assert header['ABC.def'] == 1
assert header['aBc.def'] == 1
assert header['ABC.DEF'] == 2
assert 'ABC.dEf' not in header
def test_get_rvkc_by_index(self):
"""
Returning a RVKC from a header via index lookup should return the
float value of the card.
"""
assert self._test_header[0] == 2.0
assert isinstance(self._test_header[0], float)
assert self._test_header[1] == 1.0
assert isinstance(self._test_header[1], float)
def test_get_rvkc_by_keyword(self):
"""
Returning a RVKC just via the keyword name should return the full value
string of the first card with that keyword.
This test was changed to reflect the requirement in ticket
https://aeon.stsci.edu/ssb/trac/pyfits/ticket/184--previously it required
_test_header['DP1'] to return the parsed float value.
"""
assert self._test_header['DP1'] == 'NAXIS: 2'
def test_get_rvkc_by_keyword_and_field_specifier(self):
"""
Returning a RVKC via the full keyword/field-specifier combination
should return the floating point value associated with the RVKC.
"""
assert self._test_header['DP1.NAXIS'] == 2.0
assert isinstance(self._test_header['DP1.NAXIS'], float)
assert self._test_header['DP1.AUX.1.COEFF.1'] == 0.00048828125
def test_access_nonexistent_rvkc(self):
"""
Accessing a nonexistent RVKC should raise an IndexError for
index-based lookup, or a KeyError for keyword lookup (like a normal
card).
"""
pytest.raises(IndexError, lambda x: self._test_header[x], 8)
# Test exception with message
with pytest.raises(KeyError, match=r"Keyword 'DP1\.AXIS\.3' not found."):
self._test_header['DP1.AXIS.3']
def test_update_rvkc(self):
"""A RVKC can be updated either via index or keyword access."""
self._test_header[0] = 3
assert self._test_header['DP1.NAXIS'] == 3.0
assert isinstance(self._test_header['DP1.NAXIS'], float)
self._test_header['DP1.AXIS.1'] = 1.1
assert self._test_header['DP1.AXIS.1'] == 1.1
def test_update_rvkc_2(self):
"""Regression test for an issue that appeared after SVN r2412."""
h = fits.Header()
h['D2IM1.EXTVER'] = 1
assert h['D2IM1.EXTVER'] == 1.0
h['D2IM1.EXTVER'] = 2
assert h['D2IM1.EXTVER'] == 2.0
def test_raw_keyword_value(self):
c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment")
assert c.rawkeyword == 'DP1'
assert c.rawvalue == 'NAXIS: 2'
c = fits.Card('DP1.NAXIS', 2)
assert c.rawkeyword == 'DP1'
assert c.rawvalue == 'NAXIS: 2.0'
c = fits.Card('DP1.NAXIS', 2.0)
assert c.rawkeyword == 'DP1'
assert c.rawvalue == 'NAXIS: 2.0'
def test_rvkc_insert_after(self):
"""
It should be possible to insert a new RVKC after an existing one
specified by the full keyword/field-specifier combination."""
self._test_header.set('DP1', 'AXIS.3: 1', 'a comment',
after='DP1.AXIS.2')
assert self._test_header[3] == 1
assert self._test_header['DP1.AXIS.3'] == 1
def test_rvkc_delete(self):
"""
Deleting a RVKC should work as with a normal card by using the full
keyword/field-spcifier combination.
"""
del self._test_header['DP1.AXIS.1']
assert len(self._test_header) == 7
assert list(self._test_header)[0] == 'DP1.NAXIS'
assert self._test_header[0] == 2
assert list(self._test_header)[1] == 'DP1.AXIS.2'
# Perform a subsequent delete to make sure all the index mappings were
# updated
del self._test_header['DP1.AXIS.2']
assert len(self._test_header) == 6
assert list(self._test_header)[0] == 'DP1.NAXIS'
assert self._test_header[0] == 2
assert list(self._test_header)[1] == 'DP1.NAUX'
assert self._test_header[1] == 2
def test_pattern_matching_keys(self):
"""Test the keyword filter strings with RVKCs."""
cl = self._test_header['DP1.AXIS.*']
assert isinstance(cl, fits.Header)
assert ([str(c).strip() for c in cl.cards] ==
["DP1 = 'AXIS.1: 1'",
"DP1 = 'AXIS.2: 2'"])
cl = self._test_header['DP1.N*']
assert ([str(c).strip() for c in cl.cards] ==
["DP1 = 'NAXIS: 2'",
"DP1 = 'NAUX: 2'"])
cl = self._test_header['DP1.AUX...']
assert ([str(c).strip() for c in cl.cards] ==
["DP1 = 'AUX.1.COEFF.0: 0'",
"DP1 = 'AUX.1.POWER.0: 1'",
"DP1 = 'AUX.1.COEFF.1: 0.00048828125'",
"DP1 = 'AUX.1.POWER.1: 1'"])
cl = self._test_header['DP?.NAXIS']
assert ([str(c).strip() for c in cl.cards] ==
["DP1 = 'NAXIS: 2'"])
cl = self._test_header['DP1.A*S.*']
assert ([str(c).strip() for c in cl.cards] ==
["DP1 = 'AXIS.1: 1'",
"DP1 = 'AXIS.2: 2'"])
def test_pattern_matching_key_deletion(self):
"""Deletion by filter strings should work."""
del self._test_header['DP1.A*...']
assert len(self._test_header) == 2
assert list(self._test_header)[0] == 'DP1.NAXIS'
assert self._test_header[0] == 2
assert list(self._test_header)[1] == 'DP1.NAUX'
assert self._test_header[1] == 2
def test_successive_pattern_matching(self):
"""
A card list returned via a filter string should be further filterable.
"""
cl = self._test_header['DP1.A*...']
assert ([str(c).strip() for c in cl.cards] ==
["DP1 = 'AXIS.1: 1'",
"DP1 = 'AXIS.2: 2'",
"DP1 = 'AUX.1.COEFF.0: 0'",
"DP1 = 'AUX.1.POWER.0: 1'",
"DP1 = 'AUX.1.COEFF.1: 0.00048828125'",
"DP1 = 'AUX.1.POWER.1: 1'"])
cl2 = cl['*.*AUX...']
assert ([str(c).strip() for c in cl2.cards] ==
["DP1 = 'AUX.1.COEFF.0: 0'",
"DP1 = 'AUX.1.POWER.0: 1'",
"DP1 = 'AUX.1.COEFF.1: 0.00048828125'",
"DP1 = 'AUX.1.POWER.1: 1'"])
def test_rvkc_in_cardlist_keys(self):
"""
The CardList.keys() method should return full keyword/field-spec values
for RVKCs.
"""
cl = self._test_header['DP1.AXIS.*']
assert list(cl) == ['DP1.AXIS.1', 'DP1.AXIS.2']
def test_rvkc_in_cardlist_values(self):
"""
The CardList.values() method should return the values of all RVKCs as
floating point values.
"""
cl = self._test_header['DP1.AXIS.*']
assert list(cl.values()) == [1.0, 2.0]
def test_rvkc_value_attribute(self):
"""
Individual card values should be accessible by the .value attribute
(which should return a float).
"""
cl = self._test_header['DP1.AXIS.*']
assert cl.cards[0].value == 1.0
assert isinstance(cl.cards[0].value, float)
def test_overly_permissive_parsing(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/183
Ensures that cards with standard commentary keywords are never treated
as RVKCs. Also ensures that cards not strictly matching the RVKC
pattern are not treated as such.
"""
h = fits.Header()
h['HISTORY'] = 'AXIS.1: 2'
h['HISTORY'] = 'AXIS.2: 2'
assert 'HISTORY.AXIS' not in h
assert 'HISTORY.AXIS.1' not in h
assert 'HISTORY.AXIS.2' not in h
assert h['HISTORY'] == ['AXIS.1: 2', 'AXIS.2: 2']
# This is an example straight out of the ticket where everything after
# the '2012' in the date value was being ignored, allowing the value to
# successfully be parsed as a "float"
h = fits.Header()
h['HISTORY'] = 'Date: 2012-09-19T13:58:53.756061'
assert 'HISTORY.Date' not in h
assert str(h.cards[0]) == _pad('HISTORY Date: 2012-09-19T13:58:53.756061')
c = fits.Card.fromstring(
" 'Date: 2012-09-19T13:58:53.756061'")
assert c.keyword == ''
assert c.value == "'Date: 2012-09-19T13:58:53.756061'"
assert c.field_specifier is None
h = fits.Header()
h['FOO'] = 'Date: 2012-09-19T13:58:53.756061'
assert 'FOO.Date' not in h
assert (str(h.cards[0]) ==
_pad("FOO = 'Date: 2012-09-19T13:58:53.756061'"))
def test_overly_aggressive_rvkc_lookup(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/184
Ensures that looking up a RVKC by keyword only (without the
field-specifier) in a header returns the full string value of that card
without parsing it as a RVKC. Also ensures that a full field-specifier
is required to match a RVKC--a partial field-specifier that doesn't
explicitly match any record-valued keyword should result in a KeyError.
"""
c1 = fits.Card.fromstring("FOO = 'AXIS.1: 2'")
c2 = fits.Card.fromstring("FOO = 'AXIS.2: 4'")
h = fits.Header([c1, c2])
assert h['FOO'] == 'AXIS.1: 2'
assert h[('FOO', 1)] == 'AXIS.2: 4'
assert h['FOO.AXIS.1'] == 2.0
assert h['FOO.AXIS.2'] == 4.0
assert 'FOO.AXIS' not in h
assert 'FOO.AXIS.' not in h
assert 'FOO.' not in h
pytest.raises(KeyError, lambda: h['FOO.AXIS'])
pytest.raises(KeyError, lambda: h['FOO.AXIS.'])
pytest.raises(KeyError, lambda: h['FOO.'])
def test_fitsheader_script(self):
"""Tests the basic functionality of the `fitsheader` script."""
from astropy.io.fits.scripts import fitsheader
# Can an extension by specified by the EXTNAME keyword?
hf = fitsheader.HeaderFormatter(self.data('zerowidth.fits'))
output = hf.parse(extensions=['AIPS FQ'])
assert "EXTNAME = 'AIPS FQ" in output
assert "BITPIX" in output
# Can we limit the display to one specific keyword?
output = hf.parse(extensions=['AIPS FQ'], keywords=['EXTNAME'])
assert "EXTNAME = 'AIPS FQ" in output
assert "BITPIX =" not in output
assert len(output.split('\n')) == 3
# Can we limit the display to two specific keywords?
output = hf.parse(extensions=[1],
keywords=['EXTNAME', 'BITPIX'])
assert "EXTNAME =" in output
assert "BITPIX =" in output
assert len(output.split('\n')) == 4
# Can we use wildcards for keywords?
output = hf.parse(extensions=[1], keywords=['NAXIS*'])
assert "NAXIS =" in output
assert "NAXIS1 =" in output
assert "NAXIS2 =" in output
hf.close()
# Can an extension by specified by the EXTNAME+EXTVER keywords?
hf = fitsheader.HeaderFormatter(self.data('test0.fits'))
assert "EXTNAME = 'SCI" in hf.parse(extensions=['SCI,2'])
hf.close()
# Can we print the original header before decompression?
hf = fitsheader.HeaderFormatter(self.data('comp.fits'))
assert "XTENSION= 'IMAGE" in hf.parse(extensions=[1],
compressed=False)
assert "XTENSION= 'BINTABLE" in hf.parse(extensions=[1],
compressed=True)
hf.close()
def test_fitsheader_table_feature(self):
"""Tests the `--table` feature of the `fitsheader` script."""
from astropy.io import fits
from astropy.io.fits.scripts import fitsheader
test_filename = self.data('zerowidth.fits')
formatter = fitsheader.TableHeaderFormatter(test_filename)
with fits.open(test_filename) as fitsobj:
# Does the table contain the expected number of rows?
mytable = formatter.parse([0])
assert len(mytable) == len(fitsobj[0].header)
# Repeat the above test when multiple HDUs are requested
mytable = formatter.parse(extensions=['AIPS FQ', 2, "4"])
assert len(mytable) == (len(fitsobj['AIPS FQ'].header)
+ len(fitsobj[2].header)
+ len(fitsobj[4].header))
# Can we recover the filename and extension name from the table?
mytable = formatter.parse(extensions=['AIPS FQ'])
assert np.all(mytable['filename'] == test_filename)
assert np.all(mytable['hdu'] == 'AIPS FQ')
assert mytable['value'][mytable['keyword'] == "EXTNAME"] == "AIPS FQ"
# Can we specify a single extension/keyword?
mytable = formatter.parse(extensions=['AIPS FQ'],
keywords=['EXTNAME'])
assert len(mytable) == 1
assert mytable['hdu'][0] == "AIPS FQ"
assert mytable['keyword'][0] == "EXTNAME"
assert mytable['value'][0] == "AIPS FQ"
# Is an incorrect extension dealt with gracefully?
mytable = formatter.parse(extensions=['DOES_NOT_EXIST'])
assert mytable is None
# Is an incorrect keyword dealt with gracefully?
mytable = formatter.parse(extensions=['AIPS FQ'],
keywords=['DOES_NOT_EXIST'])
assert mytable is None
formatter.close()
@pytest.mark.parametrize('mode', ['wb', 'wb+', 'ab', 'ab+'])
def test_hdu_writeto_mode(self, mode):
with open(self.temp('mode.fits'), mode=mode) as ff:
hdu = fits.ImageHDU(data=np.ones(5))
hdu.writeto(ff)
def test_subclass():
"""Check that subclasses don't get ignored on slicing and copying."""
class MyHeader(fits.Header):
def append(self, card, *args, **kwargs):
if isinstance(card, tuple) and len(card) == 2:
# Just for our checks we add a comment if there is none.
card += ('no comment',)
return super().append(card, *args, **kwargs)
my_header = MyHeader((('a', 1., 'first'),
('b', 2., 'second'),
('c', 3.,)))
assert my_header.comments['a'] == 'first'
assert my_header.comments['b'] == 'second'
assert my_header.comments['c'] == 'no comment'
slice_ = my_header[1:]
assert type(slice_) is MyHeader
assert slice_.comments['b'] == 'second'
assert slice_.comments['c'] == 'no comment'
selection = my_header['c*']
assert type(selection) is MyHeader
assert selection.comments['c'] == 'no comment'
copy_ = my_header.copy()
assert type(copy_) is MyHeader
assert copy_.comments['b'] == 'second'
assert copy_.comments['c'] == 'no comment'
my_header.extend((('d', 4.),))
assert my_header.comments['d'] == 'no comment'
|
afdea4237857b60b77c8229f38536f38fb7e61a5459fe9043215597bbb9ddcda | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
import os
from . import FitsTestCase
from astropy.io.fits.convenience import writeto
from astropy.io.fits.hdu import PrimaryHDU, hdulist
from astropy.io.fits import Header, ImageHDU, HDUList, FITSDiff
from astropy.io.fits.scripts import fitsdiff
from astropy import __version__ as version
class TestFITSDiff_script(FitsTestCase):
def test_help(self):
with pytest.raises(SystemExit) as e:
fitsdiff.main(['-h'])
assert e.value.code == 0
def test_version(self, capsys):
with pytest.raises(SystemExit) as e:
fitsdiff.main(['--version'])
out = capsys.readouterr()[0]
assert out == f'fitsdiff {version}'
assert e.value.code == 0
def test_noargs(self):
with pytest.raises(SystemExit) as e:
fitsdiff.main([""])
assert e.value.code == 2
def test_oneargargs(self):
with pytest.raises(SystemExit) as e:
fitsdiff.main(["file1"])
assert e.value.code == 2
def test_nodiff(self):
a = np.arange(100).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a.copy()
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp('testa.fits')
tmp_b = self.temp('testb.fits')
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main([tmp_a, tmp_b])
assert numdiff == 0
def test_onediff(self):
a = np.arange(100).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a.copy()
b[1, 0] = 12
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp('testa.fits')
tmp_b = self.temp('testb.fits')
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main([tmp_a, tmp_b])
assert numdiff == 1
def test_manydiff(self, capsys):
a = np.arange(100).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a + 1
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp('testa.fits')
tmp_b = self.temp('testb.fits')
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main([tmp_a, tmp_b])
out, err = capsys.readouterr()
assert numdiff == 1
assert out.splitlines()[-4:] == [
' a> 9',
' b> 10',
' ...',
' 100 different pixels found (100.00% different).']
numdiff = fitsdiff.main(['-n', '1', tmp_a, tmp_b])
out, err = capsys.readouterr()
assert numdiff == 1
assert out.splitlines()[-4:] == [
' a> 0',
' b> 1',
' ...',
' 100 different pixels found (100.00% different).']
def test_outputfile(self):
a = np.arange(100).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a.copy()
b[1, 0] = 12
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp('testa.fits')
tmp_b = self.temp('testb.fits')
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main(['-o', self.temp('diff.txt'), tmp_a, tmp_b])
assert numdiff == 1
with open(self.temp('diff.txt')) as f:
out = f.read()
assert out.splitlines()[-4:] == [
' Data differs at [1, 2]:',
' a> 10',
' b> 12',
' 1 different pixels found (1.00% different).']
def test_atol(self):
a = np.arange(100, dtype=float).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a.copy()
b[1, 0] = 11
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp('testa.fits')
tmp_b = self.temp('testb.fits')
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main(["-a", "1", tmp_a, tmp_b])
assert numdiff == 0
numdiff = fitsdiff.main(["--exact", "-a", "1", tmp_a, tmp_b])
assert numdiff == 1
def test_rtol(self):
a = np.arange(100, dtype=float).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a.copy()
b[1, 0] = 11
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp('testa.fits')
tmp_b = self.temp('testb.fits')
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main(["-r", "1e-1", tmp_a, tmp_b])
assert numdiff == 0
def test_rtol_diff(self, capsys):
a = np.arange(100, dtype=float).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a.copy()
b[1, 0] = 11
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp('testa.fits')
tmp_b = self.temp('testb.fits')
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main(["-r", "1e-2", tmp_a, tmp_b])
assert numdiff == 1
out, err = capsys.readouterr()
assert out == f"""
fitsdiff: {version}
a: {tmp_a}
b: {tmp_b}
Maximum number of different data values to be reported: 10
Relative tolerance: 0.01, Absolute tolerance: 0.0
Primary HDU:
Data contains differences:
Data differs at [1, 2]:
a> 10.0
? ^
b> 11.0
? ^
1 different pixels found (1.00% different).
"""
assert err == ""
def test_wildcard(self):
tmp1 = self.temp("tmp_file1")
with pytest.raises(SystemExit) as e:
fitsdiff.main([tmp1+"*", "ACME"])
assert e.value.code == 2
def test_not_quiet(self, capsys):
a = np.arange(100).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a.copy()
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp('testa.fits')
tmp_b = self.temp('testb.fits')
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main([tmp_a, tmp_b])
assert numdiff == 0
out, err = capsys.readouterr()
assert out == f"""
fitsdiff: {version}
a: {tmp_a}
b: {tmp_b}
Maximum number of different data values to be reported: 10
Relative tolerance: 0.0, Absolute tolerance: 0.0
No differences found.
"""
assert err == ""
def test_quiet(self, capsys):
a = np.arange(100).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a.copy()
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp('testa.fits')
tmp_b = self.temp('testb.fits')
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main(["-q", tmp_a, tmp_b])
assert numdiff == 0
out, err = capsys.readouterr()
assert out == ""
assert err == ""
@pytest.mark.slow
def test_path(self, capsys):
os.mkdir(self.temp('sub/'))
tmp_b = self.temp('sub/ascii.fits')
tmp_g = self.temp('sub/group.fits')
tmp_h = self.data('group.fits')
with hdulist.fitsopen(tmp_h) as hdu_b:
hdu_b.writeto(tmp_g)
writeto(tmp_b, np.arange(100).reshape(10, 10))
# one modified file and a directory
assert fitsdiff.main(["-q", self.data_dir, tmp_b]) == 1
assert fitsdiff.main(["-q", tmp_b, self.data_dir]) == 1
# two directories
tmp_d = self.temp('sub/')
assert fitsdiff.main(["-q", self.data_dir, tmp_d]) == 1
assert fitsdiff.main(["-q", tmp_d, self.data_dir]) == 1
with pytest.warns(UserWarning, match=r"Field 'ORBPARM' has a repeat "
r"count of 0 in its format code"):
assert fitsdiff.main(["-q", self.data_dir, self.data_dir]) == 0
# no match
tmp_c = self.data('arange.fits')
fitsdiff.main([tmp_c, tmp_d])
out, err = capsys.readouterr()
assert "'arange.fits' has no match in" in err
# globbing
with pytest.warns(UserWarning, match=r"Field 'ORBPARM' has a repeat "
r"count of 0 in its format code"):
assert fitsdiff.main(["-q", self.data_dir+'/*.fits',
self.data_dir]) == 0
assert fitsdiff.main(["-q", self.data_dir+'/g*.fits', tmp_d]) == 0
# one file and a directory
tmp_f = self.data('tb.fits')
assert fitsdiff.main(["-q", tmp_f, self.data_dir]) == 0
assert fitsdiff.main(["-q", self.data_dir, tmp_f]) == 0
def test_ignore_hdus(self):
a = np.arange(100).reshape(10, 10)
b = a.copy() + 1
ha = Header([('A', 1), ('B', 2), ('C', 3)])
phdu_a = PrimaryHDU(header=ha)
phdu_b = PrimaryHDU(header=ha)
ihdu_a = ImageHDU(data=a, name='SCI')
ihdu_b = ImageHDU(data=b, name='SCI')
hdulist_a = HDUList([phdu_a, ihdu_a])
hdulist_b = HDUList([phdu_b, ihdu_b])
tmp_a = self.temp('testa.fits')
tmp_b = self.temp('testb.fits')
hdulist_a.writeto(tmp_a)
hdulist_b.writeto(tmp_b)
numdiff = fitsdiff.main([tmp_a, tmp_b])
assert numdiff == 1
numdiff = fitsdiff.main([tmp_a, tmp_b, "-u", "SCI"])
assert numdiff == 0
def test_ignore_hdus_report(self, capsys):
a = np.arange(100).reshape(10, 10)
b = a.copy() + 1
ha = Header([('A', 1), ('B', 2), ('C', 3)])
phdu_a = PrimaryHDU(header=ha)
phdu_b = PrimaryHDU(header=ha)
ihdu_a = ImageHDU(data=a, name='SCI')
ihdu_b = ImageHDU(data=b, name='SCI')
hdulist_a = HDUList([phdu_a, ihdu_a])
hdulist_b = HDUList([phdu_b, ihdu_b])
tmp_a = self.temp('testa.fits')
tmp_b = self.temp('testb.fits')
hdulist_a.writeto(tmp_a)
hdulist_b.writeto(tmp_b)
numdiff = fitsdiff.main([tmp_a, tmp_b, "-u", "SCI"])
assert numdiff == 0
out, err = capsys.readouterr()
assert "testa.fits" in out
assert "testb.fits" in out
@pytest.mark.skip(reason="fails intentionally to show open files (see PR #10159)")
def test_fitsdiff_openfile(tmpdir):
"""Make sure that failing FITSDiff doesn't leave open files."""
path1 = str(tmpdir.join("file1.fits"))
path2 = str(tmpdir.join("file2.fits"))
hdulist = HDUList([PrimaryHDU(), ImageHDU(data=np.zeros(5))])
hdulist.writeto(path1)
hdulist[1].data[0] = 1
hdulist.writeto(path2)
diff = FITSDiff(path1, path2)
assert diff.identical, diff.report()
|
21054c4d1a1ee36f96b354ba85412d972042a48b1e5c5b4b4ddef48cfcac233a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from . import FitsTestCase
from astropy.io.fits.scripts import fitsheader
from astropy import __version__ as version
class TestFITSheader_script(FitsTestCase):
def test_help(self):
with pytest.raises(SystemExit) as e:
fitsheader.main(['-h'])
assert e.value.code == 0
def test_version(self, capsys):
with pytest.raises(SystemExit) as e:
fitsheader.main(['--version'])
out = capsys.readouterr()[0]
assert out == f'fitsheader {version}'
assert e.value.code == 0
def test_file_exists(self, capsys):
fitsheader.main([self.data('arange.fits')])
out, err = capsys.readouterr()
assert out.splitlines()[1].startswith(
'SIMPLE = T / conforms to FITS standard')
assert err == ''
def test_by_keyword(self, capsys):
fitsheader.main(['-k', 'NAXIS', self.data('arange.fits')])
out, err = capsys.readouterr()
assert out.splitlines()[1].startswith(
'NAXIS = 3 / number of array dimensions')
fitsheader.main(['-k', 'NAXIS*', self.data('arange.fits')])
out, err = capsys.readouterr()
out = out.splitlines()
assert len(out) == 5
assert out[1].startswith('NAXIS')
assert out[2].startswith('NAXIS1')
assert out[3].startswith('NAXIS2')
assert out[4].startswith('NAXIS3')
fitsheader.main(['-k', 'RANDOMKEY', self.data('arange.fits')])
out, err = capsys.readouterr()
assert err.startswith('WARNING') and 'RANDOMKEY' in err
assert not err.startswith('ERROR')
def test_by_extension(self, capsys):
fitsheader.main(['-e', '1', self.data('test0.fits')])
out, err = capsys.readouterr()
assert len(out.splitlines()) == 62
fitsheader.main(['-e', '3', '-k', 'BACKGRND', self.data('test0.fits')])
out, err = capsys.readouterr()
assert out.splitlines()[1].startswith('BACKGRND= 312.')
fitsheader.main(['-e', '0', '-k', 'BACKGRND', self.data('test0.fits')])
out, err = capsys.readouterr()
assert err.startswith('WARNING')
fitsheader.main(['-e', '3', '-k', 'FOO', self.data('test0.fits')])
out, err = capsys.readouterr()
assert err.startswith('WARNING')
def test_table(self, capsys):
fitsheader.main(['-t', '-k', 'BACKGRND', self.data('test0.fits')])
out, err = capsys.readouterr()
out = out.splitlines()
assert len(out) == 5
assert out[1].endswith('| 1 | BACKGRND | 316.0 |')
assert out[2].endswith('| 2 | BACKGRND | 351.0 |')
assert out[3].endswith('| 3 | BACKGRND | 312.0 |')
assert out[4].endswith('| 4 | BACKGRND | 323.0 |')
fitsheader.main(['-t', '-e', '0', '-k', 'NAXIS',
self.data('arange.fits'),
self.data('ascii.fits'),
self.data('blank.fits')])
out, err = capsys.readouterr()
out = out.splitlines()
assert len(out) == 4
assert out[1].endswith('| 0 | NAXIS | 3 |')
assert out[2].endswith('| 0 | NAXIS | 0 |')
assert out[3].endswith('| 0 | NAXIS | 2 |')
def test_fitsort(self, capsys):
fitsheader.main(['-e', '0', '-f', '-k', 'EXPSTART', '-k', 'EXPTIME',
self.data('test0.fits'), self.data('test1.fits')])
out, err = capsys.readouterr()
out = out.splitlines()
assert len(out) == 4
assert out[2].endswith('test0.fits 49491.65366175 0.23')
assert out[3].endswith('test1.fits 49492.65366175 0.22')
fitsheader.main(['-e', '0', '-f', '-k', 'EXPSTART', '-k', 'EXPTIME',
self.data('test0.fits')])
out, err = capsys.readouterr()
out = out.splitlines()
assert len(out) == 3
assert out[2].endswith('test0.fits 49491.65366175 0.23')
fitsheader.main(['-f', '-k', 'NAXIS',
self.data('tdim.fits'), self.data('test1.fits')])
out, err = capsys.readouterr()
out = out.splitlines()
assert len(out) == 4
assert out[0].endswith('0:NAXIS 1:NAXIS 2:NAXIS 3:NAXIS 4:NAXIS')
assert out[2].endswith('tdim.fits 0 2 -- -- --')
assert out[3].endswith('test1.fits 0 2 2 2 2')
# check that files without required keyword are present
fitsheader.main(['-f', '-k', 'DATE-OBS',
self.data('table.fits'), self.data('test0.fits')])
out, err = capsys.readouterr()
out = out.splitlines()
assert len(out) == 4
assert out[2].endswith('table.fits --')
assert out[3].endswith('test0.fits 19/05/94')
# check that COMMENT and HISTORY are excluded
fitsheader.main(['-e', '0', '-f', self.data('tb.fits')])
out, err = capsys.readouterr()
out = out.splitlines()
assert len(out) == 3
assert out[2].endswith('tb.fits True 16 0 True '
'STScI-STSDAS/TABLES tb.fits 1')
def test_fitsort_sorting_keyword_fitsort(self, capsys):
"""check that sorting by keyword works"""
fitsheader.main(['-f', '-k', 'NAXIS', '-e', '0',
self.data('group.fits'), self.data('test0.fits')])
out_unsorted, err_unsorted = capsys.readouterr()
out_unsorted = out_unsorted.splitlines()
fitsheader.main(['-f', '-s', 'NAXIS', '-k', 'NAXIS', '-e', '0',
self.data('group.fits'), self.data('test0.fits')])
out_sorted, err_sorted = capsys.readouterr()
out_sorted = out_sorted.splitlines()
assert len(out_unsorted) == 4
assert out_unsorted[2].endswith('group.fits 5')
assert out_unsorted[3].endswith('test0.fits 0')
assert len(out_sorted) == 4
assert out_sorted[2].endswith('test0.fits 0')
assert out_sorted[3].endswith('group.fits 5')
def test_fitsort_sorting_keyword_complains(self, capsys):
with pytest.raises(SystemExit):
fitsheader.main(['-t', '-s', 'DUMMY',
self.data('group.fits'), self.data('test0.fits')])
out_table, err_table = capsys.readouterr()
assert 'only supported in conjunction with -f/--fitsort' in err_table
with pytest.raises(SystemExit):
fitsheader.main(['-s', 'DUMMY',
self.data('group.fits'), self.data('test0.fits')])
out_default, err_default = capsys.readouterr()
assert 'only supported in conjunction with -f/--fitsort' in err_default
def test_dotkeyword(self, capsys):
fitsheader.main(['-e', '0', '-k', 'ESO DET ID',
self.data('fixed-1890.fits')])
out, err = capsys.readouterr()
out = out.splitlines()
assert len(out) == 2
assert out[1].strip().endswith("HIERARCH ESO DET ID = 'DV13' / Detector system Id")
fitsheader.main(['-e', '0', '-k', 'ESO.DET.ID',
self.data('fixed-1890.fits')])
out, err = capsys.readouterr()
out = out.splitlines()
assert len(out) == 2
assert out[1].strip().endswith("HIERARCH ESO DET ID = 'DV13' / Detector system Id")
|
f6a16fb425ae4ff9a489d1983a9d574fb8f1a219c9c6da8f4d5ea8b6d396a5d5 | import gc
import pathlib
import warnings
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
from astropy.io.fits.column import (_parse_tdisp_format, _fortran_to_python_format,
python_to_tdisp)
from astropy.io.fits import HDUList, PrimaryHDU, BinTableHDU, ImageHDU, table_to_hdu
from astropy.io import fits
from astropy import units as u
from astropy.table import Table, QTable, Column
from astropy.table.table_helpers import simple_table
from astropy.units import allclose as quantity_allclose
from astropy.units.format.fits import UnitScaleError
from astropy.utils.compat import NUMPY_LT_1_22
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import (AstropyUserWarning,
AstropyDeprecationWarning)
from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH
from astropy.time import Time
from astropy.units.quantity import QuantityInfo
from astropy.io.tests.mixin_columns import mixin_cols, compare_attrs, serialized_names
# FITS does not preserve precision, in_subfmt, and out_subfmt.
time_attrs = ['value', 'shape', 'format', 'scale', 'location']
compare_attrs = {name: (time_attrs if isinstance(col, Time) else compare_attrs[name])
for name, col in mixin_cols.items()}
# FITS does not support multi-element location, array with object dtype,
# or logarithmic quantities.
unsupported_cols = {name: col for name, col in mixin_cols.items()
if (isinstance(col, Time) and col.location.shape != ()
or isinstance(col, np.ndarray) and col.dtype.kind == 'O'
or isinstance(col, u.LogQuantity))}
mixin_cols = {name: col for name, col in mixin_cols.items()
if name not in unsupported_cols}
def equal_data(a, b):
for name in a.dtype.names:
if not np.all(a[name] == b[name]):
return False
return True
class TestSingleTable:
def setup_class(self):
self.data = np.array(list(zip([1, 2, 3, 4],
['a', 'b', 'c', 'd'],
[2.3, 4.5, 6.7, 8.9])),
dtype=[('a', int), ('b', 'U1'), ('c', float)])
def test_simple(self, tmpdir):
filename = str(tmpdir.join('test_simple.fts'))
t1 = Table(self.data)
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert equal_data(t1, t2)
def test_simple_pathlib(self, tmpdir):
filename = pathlib.Path(str(tmpdir.join('test_simple.fit')))
t1 = Table(self.data)
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert equal_data(t1, t2)
def test_simple_meta(self, tmpdir):
filename = str(tmpdir.join('test_simple.fits'))
t1 = Table(self.data)
t1.meta['A'] = 1
t1.meta['B'] = 2.3
t1.meta['C'] = 'spam'
t1.meta['comments'] = ['this', 'is', 'a', 'long', 'comment']
t1.meta['HISTORY'] = ['first', 'second', 'third']
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert equal_data(t1, t2)
for key in t1.meta:
if isinstance(t1.meta, list):
for i in range(len(t1.meta[key])):
assert t1.meta[key][i] == t2.meta[key][i]
else:
assert t1.meta[key] == t2.meta[key]
def test_simple_meta_conflicting(self, tmpdir):
filename = str(tmpdir.join('test_simple.fits'))
t1 = Table(self.data)
t1.meta['ttype1'] = 'spam'
with pytest.warns(AstropyUserWarning, match='Meta-data keyword ttype1 '
'will be ignored since it conflicts with a FITS '
'reserved keyword') as w:
t1.write(filename, overwrite=True)
assert len(w) == 1
def test_simple_noextension(self, tmpdir):
"""
Test that file type is recognized without extension
"""
filename = str(tmpdir.join('test_simple'))
t1 = Table(self.data)
t1.write(filename, overwrite=True, format='fits')
t2 = Table.read(filename)
assert equal_data(t1, t2)
@pytest.mark.parametrize('table_type', (Table, QTable))
def test_with_units(self, table_type, tmpdir):
filename = str(tmpdir.join('test_with_units.fits'))
t1 = table_type(self.data)
t1['a'].unit = u.m
t1['c'].unit = u.km / u.s
t1.write(filename, overwrite=True)
t2 = table_type.read(filename)
assert equal_data(t1, t2)
assert t2['a'].unit == u.m
assert t2['c'].unit == u.km / u.s
def test_with_custom_units_qtable(self, tmpdir):
# Test only for QTable - for Table's Column, new units are dropped
# (as is checked in test_write_drop_nonstandard_units).
filename = str(tmpdir.join('test_with_units.fits'))
unit = u.def_unit('bandpass_sol_lum')
t = QTable()
t['l'] = np.ones(5) * unit
with pytest.warns(AstropyUserWarning) as w:
t.write(filename, overwrite=True)
assert len(w) == 1
assert 'bandpass_sol_lum' in str(w[0].message)
# Just reading back, the data is fine but the unit is not recognized.
with pytest.warns(u.UnitsWarning, match="'bandpass_sol_lum' did not parse") as w:
t2 = QTable.read(filename)
assert len(w) == 1
assert isinstance(t2['l'].unit, u.UnrecognizedUnit)
assert str(t2['l'].unit) == 'bandpass_sol_lum'
assert np.all(t2['l'].value == t['l'].value)
# But if we enable the unit, it should be recognized.
with u.add_enabled_units(unit):
t3 = QTable.read(filename)
assert t3['l'].unit is unit
assert equal_data(t3, t)
# Regression check for #8897; write used to fail when a custom
# unit was enabled.
with pytest.warns(AstropyUserWarning):
t3.write(filename, overwrite=True)
# It should also be possible to read the file in using a unit alias,
# even to a unit that may not be the same.
with u.set_enabled_aliases({'bandpass_sol_lum': u.Lsun}):
t3 = QTable.read(filename)
assert t3['l'].unit is u.Lsun
@pytest.mark.parametrize('table_type', (Table, QTable))
def test_read_with_unit_aliases(self, table_type):
hdu = BinTableHDU(self.data)
hdu.columns[0].unit = 'Angstroms'
hdu.columns[2].unit = 'ergs/(cm.s.Angstroms)'
with u.set_enabled_aliases(dict(Angstroms=u.AA, ergs=u.erg)):
t = table_type.read(hdu)
assert t['a'].unit == u.AA
assert t['c'].unit == u.erg/(u.cm*u.s*u.AA)
@pytest.mark.parametrize('table_type', (Table, QTable))
def test_with_format(self, table_type, tmpdir):
filename = str(tmpdir.join('test_with_format.fits'))
t1 = table_type(self.data)
t1['a'].format = '{:5d}'
t1['b'].format = '{:>20}'
t1['c'].format = '{:6.2f}'
t1.write(filename, overwrite=True)
t2 = table_type.read(filename)
assert equal_data(t1, t2)
assert t2['a'].format == '{:5d}'
assert t2['b'].format == '{:>20}'
assert t2['c'].format == '{:6.2f}'
def test_masked(self, tmpdir):
filename = str(tmpdir.join('test_masked.fits'))
t1 = Table(self.data, masked=True)
t1.mask['a'] = [1, 0, 1, 0]
t1.mask['b'] = [1, 0, 0, 1]
t1.mask['c'] = [0, 1, 1, 0]
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert equal_data(t1, t2)
assert np.all(t1['a'].mask == t2['a'].mask)
assert np.all(t1['b'].mask == t2['b'].mask)
assert np.all(t1['c'].mask == t2['c'].mask)
@pytest.mark.parametrize('masked', [True, False])
def test_masked_nan(self, masked, tmpdir):
"""Check that masked values by default are replaced by NaN.
This should work for any shape and be independent of whether the
Table is formally masked or not.
"""
filename = str(tmpdir.join('test_masked_nan.fits'))
a = np.ma.MaskedArray([5.25, 8.5, 3.75, 6.25], mask=[1, 0, 1, 0])
b = np.ma.MaskedArray([2.5, 4.5, 6.75, 8.875], mask=[1, 0, 0, 1], dtype='f4')
c = np.ma.stack([a, b], axis=-1)
t1 = Table([a, b, c], names=['a', 'b', 'c'], masked=masked)
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert_array_equal(t2['a'].data, [np.nan, 8.5, np.nan, 6.25])
assert_array_equal(t2['b'].data, [np.nan, 4.5, 6.75, np.nan])
assert_array_equal(t2['c'].data, np.stack([t2['a'].data, t2['b'].data],
axis=-1))
assert np.all(t1['a'].mask == t2['a'].mask)
assert np.all(t1['b'].mask == t2['b'].mask)
assert np.all(t1['c'].mask == t2['c'].mask)
def test_masked_serialize_data_mask(self, tmpdir):
filename = str(tmpdir.join('test_masked_nan.fits'))
a = np.ma.MaskedArray([5.25, 8.5, 3.75, 6.25], mask=[1, 0, 1, 0])
b = np.ma.MaskedArray([2.5, 4.5, 6.75, 8.875], mask=[1, 0, 0, 1])
c = np.ma.stack([a, b], axis=-1)
t1 = Table([a, b, c], names=['a', 'b', 'c'])
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert_array_equal(t2['a'].data, [5.25, 8.5, 3.75, 6.25])
assert_array_equal(t2['b'].data, [2.5, 4.5, 6.75, 8.875])
assert_array_equal(t2['c'].data, np.stack([t2['a'].data, t2['b'].data],
axis=-1))
assert np.all(t1['a'].mask == t2['a'].mask)
assert np.all(t1['b'].mask == t2['b'].mask)
assert np.all(t1['c'].mask == t2['c'].mask)
def test_read_from_fileobj(self, tmpdir):
filename = str(tmpdir.join('test_read_from_fileobj.fits'))
hdu = BinTableHDU(self.data)
hdu.writeto(filename, overwrite=True)
with open(filename, 'rb') as f:
t = Table.read(f)
assert equal_data(t, self.data)
def test_read_with_nonstandard_units(self):
hdu = BinTableHDU(self.data)
hdu.columns[0].unit = 'RADIANS'
hdu.columns[1].unit = 'spam'
hdu.columns[2].unit = 'millieggs'
with pytest.warns(u.UnitsWarning, match="did not parse as fits unit"):
t = Table.read(hdu)
assert equal_data(t, self.data)
@pytest.mark.parametrize('table_type', (Table, QTable))
def test_write_drop_nonstandard_units(self, table_type, tmpdir):
# While we are generous on input (see above), we are strict on
# output, dropping units not recognized by the fits standard.
filename = str(tmpdir.join('test_nonstandard_units.fits'))
spam = u.def_unit('spam')
t = table_type()
t['a'] = [1., 2., 3.] * spam
with pytest.warns(AstropyUserWarning, match='spam') as w:
t.write(filename)
assert len(w) == 1
if table_type is Table:
assert ('cannot be recovered in reading. ') in str(w[0].message)
else:
assert 'lost to non-astropy fits readers' in str(w[0].message)
with fits.open(filename) as ff:
hdu = ff[1]
assert 'TUNIT1' not in hdu.header
def test_memmap(self, tmpdir):
filename = str(tmpdir.join('test_simple.fts'))
t1 = Table(self.data)
t1.write(filename, overwrite=True)
t2 = Table.read(filename, memmap=False)
t3 = Table.read(filename, memmap=True)
assert equal_data(t2, t3)
# To avoid issues with --open-files, we need to remove references to
# data that uses memory mapping and force the garbage collection
del t1, t2, t3
gc.collect()
@pytest.mark.parametrize('memmap', (False, True))
def test_character_as_bytes(self, tmpdir, memmap):
filename = str(tmpdir.join('test_simple.fts'))
t1 = Table(self.data)
t1.write(filename, overwrite=True)
t2 = Table.read(filename, character_as_bytes=False, memmap=memmap)
t3 = Table.read(filename, character_as_bytes=True, memmap=memmap)
assert t2['b'].dtype.kind == 'U'
assert t3['b'].dtype.kind == 'S'
assert equal_data(t2, t3)
# To avoid issues with --open-files, we need to remove references to
# data that uses memory mapping and force the garbage collection
del t1, t2, t3
gc.collect()
def test_oned_single_element(self, tmpdir):
filename = str(tmpdir.join('test_oned_single_element.fits'))
table = Table({'x': [[1], [2]]})
table.write(filename, overwrite=True)
read = Table.read(filename)
assert read['x'].shape == (2, 1)
assert len(read['x'][0]) == 1
def test_write_append(self, tmpdir):
t = Table(self.data)
hdu = table_to_hdu(t)
def check_equal(filename, expected, start_from=1):
with fits.open(filename) as hdu_list:
assert len(hdu_list) == expected
for hdu_table in hdu_list[start_from:]:
assert hdu_table.header == hdu.header
assert np.all(hdu_table.data == hdu.data)
filename = str(tmpdir.join('test_write_append.fits'))
t.write(filename, append=True)
t.write(filename, append=True)
check_equal(filename, 3)
# Check the overwrite works correctly.
t.write(filename, append=True, overwrite=True)
t.write(filename, append=True)
check_equal(filename, 3)
# Normal write, check it's not appending.
t.write(filename, overwrite=True)
t.write(filename, overwrite=True)
check_equal(filename, 2)
# Now write followed by append, with different shaped tables.
t2 = Table(np.array([1, 2]))
t2.write(filename, overwrite=True)
t.write(filename, append=True)
check_equal(filename, 3, start_from=2)
assert equal_data(t2, Table.read(filename, hdu=1))
def test_write_overwrite(self, tmpdir):
t = Table(self.data)
filename = str(tmpdir.join('test_write_overwrite.fits'))
t.write(filename)
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
t.write(filename)
t.write(filename, overwrite=True)
def test_mask_nans_on_read(self, tmpdir):
filename = str(tmpdir.join('test_inexact_format_parse_on_read.fits'))
c1 = fits.Column(name='a', array=np.array([1, 2, np.nan]), format='E')
table_hdu = fits.TableHDU.from_columns([c1])
table_hdu.writeto(filename)
tab = Table.read(filename)
assert any(tab.mask)
assert tab.mask[2]
tab = Table.read(filename, mask_invalid=False)
assert tab.mask is None
# using memmap also deactivate the masking
tab = Table.read(filename, memmap=True)
assert tab.mask is None
def test_mask_null_on_read(self, tmpdir):
filename = str(tmpdir.join('test_null_format_parse_on_read.fits'))
col = fits.Column(name='a', array=np.array([1, 2, 99, 60000], dtype='u2'),
format='I', null=99, bzero=32768)
bin_table_hdu = fits.BinTableHDU.from_columns([col])
bin_table_hdu.writeto(filename, overwrite=True)
tab = Table.read(filename)
assert any(tab.mask)
assert tab.mask[2]
def test_mask_str_on_read(self, tmpdir):
filename = str(tmpdir.join('test_null_format_parse_on_read.fits'))
col = fits.Column(name='a', array=np.array([b'foo', b'bar', b''], dtype='|S3'),
format='A3')
bin_table_hdu = fits.BinTableHDU.from_columns([col])
bin_table_hdu.writeto(filename, overwrite=True)
tab = Table.read(filename)
assert any(tab.mask)
assert tab.mask[2]
tab = Table.read(filename, mask_invalid=False)
assert tab.mask is None
class TestMultipleHDU:
def setup_class(self):
self.data1 = np.array(list(zip([1, 2, 3, 4],
['a', 'b', 'c', 'd'],
[2.3, 4.5, 6.7, 8.9])),
dtype=[('a', int), ('b', 'U1'), ('c', float)])
self.data2 = np.array(list(zip([1.4, 2.3, 3.2, 4.7],
[2.3, 4.5, 6.7, 8.9])),
dtype=[('p', float), ('q', float)])
self.data3 = np.array(list(zip([1, 2, 3, 4],
[2.3, 4.5, 6.7, 8.9])),
dtype=[('A', int), ('B', float)])
hdu0 = PrimaryHDU()
hdu1 = BinTableHDU(self.data1, name='first')
hdu2 = BinTableHDU(self.data2, name='second')
hdu3 = ImageHDU(np.ones((3, 3)), name='third')
hdu4 = BinTableHDU(self.data3)
self.hdus = HDUList([hdu0, hdu1, hdu2, hdu3, hdu4])
self.hdusb = HDUList([hdu0, hdu3, hdu2, hdu1])
self.hdus3 = HDUList([hdu0, hdu3, hdu2])
self.hdus2 = HDUList([hdu0, hdu1, hdu3])
self.hdus1 = HDUList([hdu0, hdu1])
def teardown_class(self):
del self.hdus
def setup_method(self, method):
warnings.filterwarnings('always')
def test_read(self, tmpdir):
filename = str(tmpdir.join('test_read.fits'))
self.hdus.writeto(filename)
with pytest.warns(AstropyUserWarning,
match=r"hdu= was not specified but multiple tables "
r"are present, reading in first available "
r"table \(hdu=1\)"):
t = Table.read(filename)
assert equal_data(t, self.data1)
filename = str(tmpdir.join('test_read_2.fits'))
self.hdusb.writeto(filename)
with pytest.warns(AstropyUserWarning,
match=r"hdu= was not specified but multiple tables "
r"are present, reading in first available "
r"table \(hdu=2\)"):
t3 = Table.read(filename)
assert equal_data(t3, self.data2)
def test_read_with_hdu_0(self, tmpdir):
filename = str(tmpdir.join('test_read_with_hdu_0.fits'))
self.hdus.writeto(filename)
with pytest.raises(ValueError) as exc:
Table.read(filename, hdu=0)
assert exc.value.args[0] == 'No table found in hdu=0'
@pytest.mark.parametrize('hdu', [1, 'first'])
def test_read_with_hdu_1(self, tmpdir, hdu):
filename = str(tmpdir.join('test_read_with_hdu_1.fits'))
self.hdus.writeto(filename)
t = Table.read(filename, hdu=hdu)
assert equal_data(t, self.data1)
@pytest.mark.parametrize('hdu', [2, 'second'])
def test_read_with_hdu_2(self, tmpdir, hdu):
filename = str(tmpdir.join('test_read_with_hdu_2.fits'))
self.hdus.writeto(filename)
t = Table.read(filename, hdu=hdu)
assert equal_data(t, self.data2)
@pytest.mark.parametrize('hdu', [3, 'third'])
def test_read_with_hdu_3(self, tmpdir, hdu):
filename = str(tmpdir.join('test_read_with_hdu_3.fits'))
self.hdus.writeto(filename)
with pytest.raises(ValueError, match='No table found in hdu=3'):
Table.read(filename, hdu=hdu)
def test_read_with_hdu_4(self, tmpdir):
filename = str(tmpdir.join('test_read_with_hdu_4.fits'))
self.hdus.writeto(filename)
t = Table.read(filename, hdu=4)
assert equal_data(t, self.data3)
@pytest.mark.parametrize('hdu', [2, 3, '1', 'second', ''])
def test_read_with_hdu_missing(self, tmpdir, hdu):
filename = str(tmpdir.join('test_warn_with_hdu_1.fits'))
self.hdus1.writeto(filename)
with pytest.warns(AstropyDeprecationWarning,
match=rf"Specified hdu={hdu} not found, "
r"reading in first available table \(hdu=1\)"):
t1 = Table.read(filename, hdu=hdu)
assert equal_data(t1, self.data1)
@pytest.mark.parametrize('hdu', [0, 2, 'third'])
def test_read_with_hdu_warning(self, tmpdir, hdu):
filename = str(tmpdir.join('test_warn_with_hdu_2.fits'))
self.hdus2.writeto(filename)
with pytest.warns(AstropyDeprecationWarning,
match=rf"No table found in specified hdu={hdu}, "
r"reading in first available table \(hdu=1\)"):
t2 = Table.read(filename, hdu=hdu)
assert equal_data(t2, self.data1)
@pytest.mark.parametrize('hdu', [0, 1, 'third'])
def test_read_in_last_hdu(self, tmpdir, hdu):
filename = str(tmpdir.join('test_warn_with_hdu_3.fits'))
self.hdus3.writeto(filename)
with pytest.warns(AstropyDeprecationWarning,
match=rf"No table found in specified hdu={hdu}, "
r"reading in first available table \(hdu=2\)"):
t3 = Table.read(filename, hdu=hdu)
assert equal_data(t3, self.data2)
def test_read_from_hdulist(self):
with pytest.warns(AstropyUserWarning,
match=r"hdu= was not specified but multiple tables "
r"are present, reading in first available "
r"table \(hdu=1\)"):
t = Table.read(self.hdus)
assert equal_data(t, self.data1)
with pytest.warns(AstropyUserWarning,
match=r"hdu= was not specified but multiple tables "
r"are present, reading in first available "
r"table \(hdu=2\)"):
t3 = Table.read(self.hdusb)
assert equal_data(t3, self.data2)
def test_read_from_hdulist_with_hdu_0(self):
with pytest.raises(ValueError) as exc:
Table.read(self.hdus, hdu=0)
assert exc.value.args[0] == 'No table found in hdu=0'
@pytest.mark.parametrize('hdu', [1, 'first', None])
def test_read_from_hdulist_with_single_table(self, hdu):
t = Table.read(self.hdus1, hdu=hdu)
assert equal_data(t, self.data1)
@pytest.mark.parametrize('hdu', [1, 'first'])
def test_read_from_hdulist_with_hdu_1(self, hdu):
t = Table.read(self.hdus, hdu=hdu)
assert equal_data(t, self.data1)
@pytest.mark.parametrize('hdu', [2, 'second'])
def test_read_from_hdulist_with_hdu_2(self, hdu):
t = Table.read(self.hdus, hdu=hdu)
assert equal_data(t, self.data2)
@pytest.mark.parametrize('hdu', [3, 'third'])
def test_read_from_hdulist_with_hdu_3(self, hdu):
with pytest.raises(ValueError, match='No table found in hdu=3'):
Table.read(self.hdus, hdu=hdu)
@pytest.mark.parametrize('hdu', [0, 2, 'third'])
def test_read_from_hdulist_with_hdu_warning(self, hdu):
with pytest.warns(AstropyDeprecationWarning,
match=rf"No table found in specified hdu={hdu}, "
r"reading in first available table \(hdu=1\)"):
t2 = Table.read(self.hdus2, hdu=hdu)
assert equal_data(t2, self.data1)
@pytest.mark.parametrize('hdu', [2, 3, '1', 'second', ''])
def test_read_from_hdulist_with_hdu_missing(self, hdu):
with pytest.warns(AstropyDeprecationWarning,
match=rf"Specified hdu={hdu} not found, "
r"reading in first available table \(hdu=1\)"):
t1 = Table.read(self.hdus1, hdu=hdu)
assert equal_data(t1, self.data1)
@pytest.mark.parametrize('hdu', [0, 1, 'third'])
def test_read_from_hdulist_in_last_hdu(self, hdu):
with pytest.warns(AstropyDeprecationWarning,
match=rf"No table found in specified hdu={hdu}, "
r"reading in first available table \(hdu=2\)"):
t3 = Table.read(self.hdus3, hdu=hdu)
assert equal_data(t3, self.data2)
@pytest.mark.parametrize('hdu', [None, 1, 'first'])
def test_read_from_single_hdu(self, hdu):
t = Table.read(self.hdus[1])
assert equal_data(t, self.data1)
def test_masking_regression_1795():
"""
Regression test for #1795 - this bug originally caused columns where TNULL
was not defined to have their first element masked.
"""
t = Table.read(get_pkg_data_filename('data/tb.fits'))
assert np.all(t['c1'].mask == np.array([False, False]))
assert not hasattr(t['c2'], 'mask')
assert not hasattr(t['c3'], 'mask')
assert not hasattr(t['c4'], 'mask')
assert np.all(t['c1'].data == np.array([1, 2]))
assert np.all(t['c2'].data == np.array([b'abc', b'xy ']))
assert_allclose(t['c3'].data, np.array([3.70000007153, 6.6999997139]))
assert np.all(t['c4'].data == np.array([False, True]))
def test_scale_error():
a = [1, 4, 5]
b = [2.0, 5.0, 8.2]
c = ['x', 'y', 'z']
t = Table([a, b, c], names=('a', 'b', 'c'), meta={'name': 'first table'})
t['a'].unit = '1.2'
with pytest.raises(UnitScaleError, match=r"The column 'a' could not be "
r"stored in FITS format because it has a scale '\(1\.2\)'"
r" that is not recognized by the FITS standard\. Either "
r"scale the data or change the units\."):
t.write('t.fits', format='fits', overwrite=True)
@pytest.mark.parametrize('tdisp_str, format_return',
[('EN10.5', ('EN', '10', '5', None)),
('F6.2', ('F', '6', '2', None)),
('B5.10', ('B', '5', '10', None)),
('E10.5E3', ('E', '10', '5', '3')),
('A21', ('A', '21', None, None))])
def test_parse_tdisp_format(tdisp_str, format_return):
assert _parse_tdisp_format(tdisp_str) == format_return
@pytest.mark.parametrize('tdisp_str, format_str_return',
[('G15.4E2', '{:15.4g}'),
('Z5.10', '{:5x}'),
('I6.5', '{:6d}'),
('L8', '{:>8}'),
('E20.7', '{:20.7e}')])
def test_fortran_to_python_format(tdisp_str, format_str_return):
assert _fortran_to_python_format(tdisp_str) == format_str_return
@pytest.mark.parametrize('fmt_str, tdisp_str',
[('{:3d}', 'I3'),
('3d', 'I3'),
('7.3f', 'F7.3'),
('{:>4}', 'A4'),
('{:7.4f}', 'F7.4'),
('%5.3g', 'G5.3'),
('%10s', 'A10'),
('%.4f', 'F13.4')])
def test_python_to_tdisp(fmt_str, tdisp_str):
assert python_to_tdisp(fmt_str) == tdisp_str
def test_logical_python_to_tdisp():
assert python_to_tdisp('{:>7}', logical_dtype=True) == 'L7'
def test_bool_column(tmpdir):
"""
Regression test for https://github.com/astropy/astropy/issues/1953
Ensures that Table columns of bools are properly written to a FITS table.
"""
arr = np.ones(5, dtype=bool)
arr[::2] == np.False_
t = Table([arr])
t.write(str(tmpdir.join('test.fits')), overwrite=True)
with fits.open(str(tmpdir.join('test.fits'))) as hdul:
assert hdul[1].data['col0'].dtype == np.dtype('bool')
assert np.all(hdul[1].data['col0'] == arr)
def test_unicode_column(tmpdir):
"""
Test that a column of unicode strings is still written as one
byte-per-character in the FITS table (so long as the column can be ASCII
encoded).
Regression test for one of the issues fixed in
https://github.com/astropy/astropy/pull/4228
"""
t = Table([np.array(['a', 'b', 'cd'])])
t.write(str(tmpdir.join('test.fits')), overwrite=True)
with fits.open(str(tmpdir.join('test.fits'))) as hdul:
assert np.all(hdul[1].data['col0'] == ['a', 'b', 'cd'])
assert hdul[1].header['TFORM1'] == '2A'
t2 = Table([np.array(['\N{SNOWMAN}'])])
with pytest.raises(UnicodeEncodeError):
t2.write(str(tmpdir.join('test.fits')), overwrite=True)
def test_unit_warnings_read_write(tmpdir):
filename = str(tmpdir.join('test_unit.fits'))
t1 = Table([[1, 2], [3, 4]], names=['a', 'b'])
t1['a'].unit = 'm/s'
t1['b'].unit = 'not-a-unit'
with pytest.warns(u.UnitsWarning, match="'not-a-unit' did not parse as fits unit") as w:
t1.write(filename, overwrite=True)
assert len(w) == 1
with pytest.warns(u.UnitsWarning, match="'not-a-unit' did not parse as fits unit") as w:
Table.read(filename, hdu=1)
def test_convert_comment_convention(tmpdir):
"""
Regression test for https://github.com/astropy/astropy/issues/6079
"""
filename = get_pkg_data_filename('data/stddata.fits')
with pytest.warns(AstropyUserWarning, match=r'hdu= was not specified but '
r'multiple tables are present'):
t = Table.read(filename)
assert t.meta['comments'] == [
'',
' *** End of mandatory fields ***',
'',
'',
' *** Column names ***',
'',
'',
' *** Column formats ***',
''
]
def assert_objects_equal(obj1, obj2, attrs, compare_class=True):
if compare_class:
assert obj1.__class__ is obj2.__class__
info_attrs = ['info.name', 'info.format', 'info.unit', 'info.description', 'info.meta',
'info.dtype']
for attr in attrs + info_attrs:
a1 = obj1
a2 = obj2
for subattr in attr.split('.'):
try:
a1 = getattr(a1, subattr)
a2 = getattr(a2, subattr)
except AttributeError:
a1 = a1[subattr]
a2 = a2[subattr]
# Mixin info.meta can None instead of empty OrderedDict(), #6720 would
# fix this.
if attr == 'info.meta':
if a1 is None:
a1 = {}
if a2 is None:
a2 = {}
if isinstance(a1, np.ndarray) and a1.dtype.kind == 'f':
assert quantity_allclose(a1, a2, rtol=1e-15)
elif isinstance(a1, np.dtype):
# FITS does not perfectly preserve dtype: byte order can change, and
# unicode gets stored as bytes. So, we just check safe casting, to
# ensure we do not, e.g., accidentally change integer to float, etc.
if NUMPY_LT_1_22 and a1.names:
# For old numpy, can_cast does not deal well with structured dtype.
assert a1.names == a2.names
else:
assert np.can_cast(a2, a1, casting='safe')
else:
assert np.all(a1 == a2)
def test_fits_mixins_qtable_to_table(tmpdir):
"""Test writing as QTable and reading as Table. Ensure correct classes
come out.
"""
filename = str(tmpdir.join('test_simple.fits'))
names = sorted(mixin_cols)
t = QTable([mixin_cols[name] for name in names], names=names)
t.write(filename, format='fits')
t2 = Table.read(filename, format='fits', astropy_native=True)
assert t.colnames == t2.colnames
for name, col in t.columns.items():
col2 = t2[name]
# Special-case Time, which does not yet support round-tripping
# the format.
if isinstance(col2, Time):
col2.format = col.format
attrs = compare_attrs[name]
compare_class = True
if isinstance(col.info, QuantityInfo):
# Downgrade Quantity to Column + unit
assert type(col2) is Column
# Class-specific attributes like `value` or `wrap_angle` are lost.
attrs = ['unit']
compare_class = False
# Compare data values here (assert_objects_equal doesn't know how in this case)
assert np.all(col.value == col2)
assert_objects_equal(col, col2, attrs, compare_class)
@pytest.mark.parametrize('table_cls', (Table, QTable))
def test_fits_mixins_as_one(table_cls, tmpdir):
"""Test write/read all cols at once and validate intermediate column names"""
filename = str(tmpdir.join('test_simple.fits'))
names = sorted(mixin_cols)
# FITS stores times directly, so we just get the column back.
all_serialized_names = []
for name in sorted(mixin_cols):
all_serialized_names.extend(
[name] if isinstance(mixin_cols[name], Time)
else serialized_names[name])
t = table_cls([mixin_cols[name] for name in names], names=names)
t.meta['C'] = 'spam'
t.meta['comments'] = ['this', 'is', 'a', 'comment']
t.meta['history'] = ['first', 'second', 'third']
t.write(filename, format="fits")
t2 = table_cls.read(filename, format='fits', astropy_native=True)
assert t2.meta['C'] == 'spam'
assert t2.meta['comments'] == ['this', 'is', 'a', 'comment']
assert t2.meta['HISTORY'] == ['first', 'second', 'third']
assert t.colnames == t2.colnames
# Read directly via fits and confirm column names
with fits.open(filename) as hdus:
assert hdus[1].columns.names == all_serialized_names
@pytest.mark.parametrize('name_col', list(mixin_cols.items()))
@pytest.mark.parametrize('table_cls', (Table, QTable))
def test_fits_mixins_per_column(table_cls, name_col, tmpdir):
"""Test write/read one col at a time and do detailed validation"""
filename = str(tmpdir.join('test_simple.fits'))
name, col = name_col
c = [1.0, 2.0]
t = table_cls([c, col, c], names=['c1', name, 'c2'])
t[name].info.description = 'my \n\n\n description'
t[name].info.meta = {'list': list(range(50)), 'dict': {'a': 'b' * 200}}
if not t.has_mixin_columns:
pytest.skip('column is not a mixin (e.g. Quantity subclass in Table)')
t.write(filename, format="fits")
t2 = table_cls.read(filename, format='fits', astropy_native=True)
if isinstance(col, Time):
# FITS Time does not preserve format
t2[name].format = col.format
assert t.colnames == t2.colnames
for colname in t.colnames:
compare = ['data'] if colname in ('c1', 'c2') else compare_attrs[colname]
assert_objects_equal(t[colname], t2[colname], compare)
# Special case to make sure Column type doesn't leak into Time class data
if name.startswith('tm'):
assert t2[name]._time.jd1.__class__ is np.ndarray
assert t2[name]._time.jd2.__class__ is np.ndarray
@pytest.mark.parametrize('name_col', unsupported_cols.items())
@pytest.mark.xfail(reason='column type unsupported')
def test_fits_unsupported_mixin(self, name_col, tmpdir):
# Check that we actually fail in writing unsupported columns defined
# on top.
filename = str(tmpdir.join('test_simple.fits'))
name, col = name_col
Table([col], names=[name]).write(filename, format='fits')
def test_info_attributes_with_no_mixins(tmpdir):
"""Even if there are no mixin columns, if there is metadata that would be lost it still
gets serialized
"""
filename = str(tmpdir.join('test.fits'))
t = Table([[1.0, 2.0]])
t['col0'].description = 'hello' * 40
t['col0'].format = '{:8.4f}'
t['col0'].meta['a'] = {'b': 'c'}
t.write(filename, overwrite=True)
t2 = Table.read(filename)
assert t2['col0'].description == 'hello' * 40
assert t2['col0'].format == '{:8.4f}'
assert t2['col0'].meta['a'] == {'b': 'c'}
@pytest.mark.parametrize('method', ['set_cols', 'names', 'class'])
def test_round_trip_masked_table_serialize_mask(tmpdir, method):
"""
Same as previous test but set the serialize_method to 'data_mask' so mask is
written out and the behavior is all correct.
"""
filename = str(tmpdir.join('test.fits'))
t = simple_table(masked=True) # int, float, and str cols with one masked element
# MaskedColumn but no masked elements. See table the MaskedColumnInfo class
# _represent_as_dict() method for info about we test a column with no masked elements.
t['d'] = [1, 2, 3]
if method == 'set_cols':
for col in t.itercols():
col.info.serialize_method['fits'] = 'data_mask'
t.write(filename)
elif method == 'names':
t.write(filename, serialize_method={'a': 'data_mask', 'b': 'data_mask',
'c': 'data_mask', 'd': 'data_mask'})
elif method == 'class':
t.write(filename, serialize_method='data_mask')
t2 = Table.read(filename)
assert t2.masked is False
assert t2.colnames == t.colnames
for name in t2.colnames:
assert np.all(t2[name].mask == t[name].mask)
assert np.all(t2[name] == t[name])
# Data under the mask round-trips also (unmask data to show this).
t[name].mask = False
t2[name].mask = False
assert np.all(t2[name] == t[name])
def test_meta_not_modified(tmpdir):
filename = str(tmpdir.join('test.fits'))
t = Table(data=[Column([1, 2], 'a', description='spam')])
t.meta['comments'] = ['a', 'b']
assert len(t.meta) == 1
t.write(filename)
assert len(t.meta) == 1
assert t.meta['comments'] == ['a', 'b']
|
bcdd9962ce45a165a53b519b97cf85819fa78fcfc991fc62666f0529560ebbd8 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import math
import os
import re
import time
import pytest
import numpy as np
from numpy.testing import assert_equal
from astropy.io import fits
from astropy.io.fits.hdu.compressed import SUBTRACTIVE_DITHER_1, DITHER_SEED_CHECKSUM
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
from .test_table import comparerecords
from . import FitsTestCase
class TestImageFunctions(FitsTestCase):
def test_constructor_name_arg(self):
"""Like the test of the same name in test_table.py"""
hdu = fits.ImageHDU()
assert hdu.name == ''
assert 'EXTNAME' not in hdu.header
hdu.name = 'FOO'
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
# Passing name to constructor
hdu = fits.ImageHDU(name='FOO')
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
# And overriding a header with a different extname
hdr = fits.Header()
hdr['EXTNAME'] = 'EVENTS'
hdu = fits.ImageHDU(header=hdr, name='FOO')
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
def test_constructor_ver_arg(self):
def assert_ver_is(hdu, reference_ver):
assert hdu.ver == reference_ver
assert hdu.header['EXTVER'] == reference_ver
hdu = fits.ImageHDU()
assert hdu.ver == 1 # defaults to 1
assert 'EXTVER' not in hdu.header
hdu.ver = 1
assert_ver_is(hdu, 1)
# Passing name to constructor
hdu = fits.ImageHDU(ver=2)
assert_ver_is(hdu, 2)
# And overriding a header with a different extver
hdr = fits.Header()
hdr['EXTVER'] = 3
hdu = fits.ImageHDU(header=hdr, ver=4)
assert_ver_is(hdu, 4)
# The header card is not overridden if ver is None or not passed in
hdr = fits.Header()
hdr['EXTVER'] = 5
hdu = fits.ImageHDU(header=hdr, ver=None)
assert_ver_is(hdu, 5)
hdu = fits.ImageHDU(header=hdr)
assert_ver_is(hdu, 5)
def test_constructor_copies_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/153
Ensure that a header from one HDU is copied when used to initialize new
HDU.
"""
ifd = fits.HDUList(fits.PrimaryHDU())
phdr = ifd[0].header
phdr['FILENAME'] = 'labq01i3q_rawtag.fits'
primary_hdu = fits.PrimaryHDU(header=phdr)
ofd = fits.HDUList(primary_hdu)
ofd[0].header['FILENAME'] = 'labq01i3q_flt.fits'
# Original header should be unchanged
assert phdr['FILENAME'] == 'labq01i3q_rawtag.fits'
def test_open(self):
# The function "open" reads a FITS file into an HDUList object. There
# are three modes to open: "readonly" (the default), "append", and
# "update".
# Open a file read-only (the default mode), the content of the FITS
# file are read into memory.
r = fits.open(self.data('test0.fits')) # readonly
# data parts are latent instantiation, so if we close the HDUList
# without touching data, data can not be accessed.
r.close()
with pytest.raises(IndexError) as exc_info:
r[1].data[:2, :2]
# Check that the exception message is the enhanced version, not the
# default message from list.__getitem__
assert str(exc_info.value) == ('HDU not found, possibly because the index '
'is out of range, or because the file was '
'closed before all HDUs were read')
def test_open_2(self):
r = fits.open(self.data('test0.fits'))
info = ([(0, 'PRIMARY', 1, 'PrimaryHDU', 138, (), '', '')] +
[(x, 'SCI', x, 'ImageHDU', 61, (40, 40), 'int16', '')
for x in range(1, 5)])
try:
assert r.info(output=False) == info
finally:
r.close()
def test_open_3(self):
# Test that HDUs cannot be accessed after the file was closed
r = fits.open(self.data('test0.fits'))
r.close()
with pytest.raises(IndexError) as exc_info:
r[1]
# Check that the exception message is the enhanced version, not the
# default message from list.__getitem__
assert str(exc_info.value) == ('HDU not found, possibly because the index '
'is out of range, or because the file was '
'closed before all HDUs were read')
# Test that HDUs can be accessed with lazy_load_hdus=False
r = fits.open(self.data('test0.fits'), lazy_load_hdus=False)
r.close()
assert isinstance(r[1], fits.ImageHDU)
assert len(r) == 5
with pytest.raises(IndexError) as exc_info:
r[6]
assert str(exc_info.value) == 'list index out of range'
# And the same with the global config item
assert fits.conf.lazy_load_hdus # True by default
fits.conf.lazy_load_hdus = False
try:
r = fits.open(self.data('test0.fits'))
r.close()
assert isinstance(r[1], fits.ImageHDU)
assert len(r) == 5
finally:
fits.conf.lazy_load_hdus = True
def test_fortran_array(self):
# Test that files are being correctly written+read for "C" and "F" order arrays
a = np.arange(21).reshape(3, 7)
b = np.asfortranarray(a)
afits = self.temp('a_str.fits')
bfits = self.temp('b_str.fits')
# writing to str specified files
fits.PrimaryHDU(data=a).writeto(afits)
fits.PrimaryHDU(data=b).writeto(bfits)
np.testing.assert_array_equal(fits.getdata(afits), a)
np.testing.assert_array_equal(fits.getdata(bfits), a)
# writing to fileobjs
aafits = self.temp('a_fileobj.fits')
bbfits = self.temp('b_fileobj.fits')
with open(aafits, mode='wb') as fd:
fits.PrimaryHDU(data=a).writeto(fd)
with open(bbfits, mode='wb') as fd:
fits.PrimaryHDU(data=b).writeto(fd)
np.testing.assert_array_equal(fits.getdata(aafits), a)
np.testing.assert_array_equal(fits.getdata(bbfits), a)
def test_fortran_array_non_contiguous(self):
# Test that files are being correctly written+read for 'C' and 'F' order arrays
a = np.arange(105).reshape(3, 5, 7)
b = np.asfortranarray(a)
# writing to str specified files
afits = self.temp('a_str_slice.fits')
bfits = self.temp('b_str_slice.fits')
fits.PrimaryHDU(data=a[::2, ::2]).writeto(afits)
fits.PrimaryHDU(data=b[::2, ::2]).writeto(bfits)
np.testing.assert_array_equal(fits.getdata(afits), a[::2, ::2])
np.testing.assert_array_equal(fits.getdata(bfits), a[::2, ::2])
# writing to fileobjs
aafits = self.temp('a_fileobj_slice.fits')
bbfits = self.temp('b_fileobj_slice.fits')
with open(aafits, mode='wb') as fd:
fits.PrimaryHDU(data=a[::2, ::2]).writeto(fd)
with open(bbfits, mode='wb') as fd:
fits.PrimaryHDU(data=b[::2, ::2]).writeto(fd)
np.testing.assert_array_equal(fits.getdata(aafits), a[::2, ::2])
np.testing.assert_array_equal(fits.getdata(bbfits), a[::2, ::2])
def test_primary_with_extname(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/151
Tests that the EXTNAME keyword works with Primary HDUs as well, and
interacts properly with the .name attribute. For convenience
hdulist['PRIMARY'] will still refer to the first HDU even if it has an
EXTNAME not equal to 'PRIMARY'.
"""
prihdr = fits.Header([('EXTNAME', 'XPRIMARY'), ('EXTVER', 1)])
hdul = fits.HDUList([fits.PrimaryHDU(header=prihdr)])
assert 'EXTNAME' in hdul[0].header
assert hdul[0].name == 'XPRIMARY'
assert hdul[0].name == hdul[0].header['EXTNAME']
info = [(0, 'XPRIMARY', 1, 'PrimaryHDU', 5, (), '', '')]
assert hdul.info(output=False) == info
assert hdul['PRIMARY'] is hdul['XPRIMARY']
assert hdul['PRIMARY'] is hdul[('XPRIMARY', 1)]
hdul[0].name = 'XPRIMARY2'
assert hdul[0].header['EXTNAME'] == 'XPRIMARY2'
hdul.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert hdul[0].name == 'XPRIMARY2'
def test_io_manipulation(self):
# Get a keyword value. An extension can be referred by name or by
# number. Both extension and keyword names are case insensitive.
with fits.open(self.data('test0.fits')) as r:
assert r['primary'].header['naxis'] == 0
assert r[0].header['naxis'] == 0
# If there are more than one extension with the same EXTNAME value,
# the EXTVER can be used (as the second argument) to distinguish
# the extension.
assert r['sci', 1].header['detector'] == 1
# append (using "update()") a new card
r[0].header['xxx'] = 1.234e56
assert ('\n'.join(str(x) for x in r[0].header.cards[-3:]) ==
"EXPFLAG = 'NORMAL ' / Exposure interruption indicator \n"
"FILENAME= 'vtest3.fits' / File name \n"
"XXX = 1.234E+56 ")
# rename a keyword
r[0].header.rename_keyword('filename', 'fname')
pytest.raises(ValueError, r[0].header.rename_keyword, 'fname',
'history')
pytest.raises(ValueError, r[0].header.rename_keyword, 'fname',
'simple')
r[0].header.rename_keyword('fname', 'filename')
# get a subsection of data
assert np.array_equal(r[2].data[:3, :3],
np.array([[349, 349, 348],
[349, 349, 347],
[347, 350, 349]], dtype=np.int16))
# We can create a new FITS file by opening a new file with "append"
# mode.
with fits.open(self.temp('test_new.fits'), mode='append') as n:
# Append the primary header and the 2nd extension to the new
# file.
n.append(r[0])
n.append(r[2])
# The flush method will write the current HDUList object back
# to the newly created file on disk. The HDUList is still open
# and can be further operated.
n.flush()
assert n[1].data[1, 1] == 349
# modify a data point
n[1].data[1, 1] = 99
# When the file is closed, the most recent additions of
# extension(s) since last flush() will be appended, but any HDU
# already existed at the last flush will not be modified
del n
# If an existing file is opened with "append" mode, like the
# readonly mode, the HDU's will be read into the HDUList which can
# be modified in memory but can not be written back to the original
# file. A file opened with append mode can only add new HDU's.
os.rename(self.temp('test_new.fits'),
self.temp('test_append.fits'))
with fits.open(self.temp('test_append.fits'), mode='append') as a:
# The above change did not take effect since this was made
# after the flush().
assert a[1].data[1, 1] == 349
a.append(r[1])
del a
# When changes are made to an HDUList which was opened with
# "update" mode, they will be written back to the original file
# when a flush/close is called.
os.rename(self.temp('test_append.fits'),
self.temp('test_update.fits'))
with fits.open(self.temp('test_update.fits'), mode='update') as u:
# When the changes do not alter the size structures of the
# original (or since last flush) HDUList, the changes are
# written back "in place".
assert u[0].header['rootname'] == 'U2EQ0201T'
u[0].header['rootname'] = 'abc'
assert u[1].data[1, 1] == 349
u[1].data[1, 1] = 99
u.flush()
# If the changes affect the size structure, e.g. adding or
# deleting HDU(s), header was expanded or reduced beyond
# existing number of blocks (2880 bytes in each block), or
# change the data size, the HDUList is written to a temporary
# file, the original file is deleted, and the temporary file is
# renamed to the original file name and reopened in the update
# mode. To a user, these two kinds of updating writeback seem
# to be the same, unless the optional argument in flush or
# close is set to 1.
del u[2]
u.flush()
# The write method in HDUList class writes the current HDUList,
# with all changes made up to now, to a new file. This method
# works the same disregard the mode the HDUList was opened
# with.
u.append(r[3])
u.writeto(self.temp('test_new.fits'))
del u
# Another useful new HDUList method is readall. It will "touch" the
# data parts in all HDUs, so even if the HDUList is closed, we can
# still operate on the data.
with fits.open(self.data('test0.fits')) as r:
r.readall()
assert r[1].data[1, 1] == 315
# create an HDU with data only
data = np.ones((3, 5), dtype=np.float32)
hdu = fits.ImageHDU(data=data, name='SCI')
assert np.array_equal(hdu.data,
np.array([[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.]],
dtype=np.float32))
# create an HDU with header and data
# notice that the header has the right NAXIS's since it is constructed
# with ImageHDU
hdu2 = fits.ImageHDU(header=r[1].header, data=np.array([1, 2],
dtype='int32'))
assert ('\n'.join(str(x) for x in hdu2.header.cards[1:5]) ==
"BITPIX = 32 / array data type \n"
"NAXIS = 1 / number of array dimensions \n"
"NAXIS1 = 2 \n"
"PCOUNT = 0 / number of parameters ")
def test_memory_mapping(self):
# memory mapping
f1 = fits.open(self.data('test0.fits'), memmap=1)
f1.close()
def test_verification_on_output(self):
# verification on output
# make a defect HDUList first
x = fits.ImageHDU()
hdu = fits.HDUList(x) # HDUList can take a list or one single HDU
with pytest.warns(AstropyUserWarning, match=r"HDUList's 0th element is not a primary HDU\.") as w:
hdu.verify()
assert len(w) == 3
with pytest.warns(AstropyUserWarning, match=r"HDUList's 0th element is not a primary HDU\. "
r"Fixed by inserting one as 0th HDU\.") as w:
hdu.writeto(self.temp('test_new2.fits'), 'fix')
assert len(w) == 3
def test_section(self):
# section testing
fs = fits.open(self.data('arange.fits'))
assert np.array_equal(fs[0].section[3, 2, 5], 357)
assert np.array_equal(
fs[0].section[3, 2, :],
np.array([352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362]))
assert np.array_equal(fs[0].section[3, 2, 4:],
np.array([356, 357, 358, 359, 360, 361, 362]))
assert np.array_equal(fs[0].section[3, 2, :8],
np.array([352, 353, 354, 355, 356, 357, 358, 359]))
assert np.array_equal(fs[0].section[3, 2, -8:8],
np.array([355, 356, 357, 358, 359]))
assert np.array_equal(
fs[0].section[3, 2:5, :],
np.array([[352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362],
[363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373],
[374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384]]))
assert np.array_equal(fs[0].section[3, :, :][:3, :3],
np.array([[330, 331, 332],
[341, 342, 343],
[352, 353, 354]]))
dat = fs[0].data
assert np.array_equal(fs[0].section[3, 2:5, :8], dat[3, 2:5, :8])
assert np.array_equal(fs[0].section[3, 2:5, 3], dat[3, 2:5, 3])
assert np.array_equal(fs[0].section[3:6, :, :][:3, :3, :3],
np.array([[[330, 331, 332],
[341, 342, 343],
[352, 353, 354]],
[[440, 441, 442],
[451, 452, 453],
[462, 463, 464]],
[[550, 551, 552],
[561, 562, 563],
[572, 573, 574]]]))
assert np.array_equal(fs[0].section[:, :, :][:3, :2, :2],
np.array([[[0, 1],
[11, 12]],
[[110, 111],
[121, 122]],
[[220, 221],
[231, 232]]]))
assert np.array_equal(fs[0].section[:, 2, :], dat[:, 2, :])
assert np.array_equal(fs[0].section[:, 2:5, :], dat[:, 2:5, :])
assert np.array_equal(fs[0].section[3:6, 3, :], dat[3:6, 3, :])
assert np.array_equal(fs[0].section[3:6, 3:7, :], dat[3:6, 3:7, :])
assert np.array_equal(fs[0].section[:, ::2], dat[:, ::2])
assert np.array_equal(fs[0].section[:, [1, 2, 4], 3],
dat[:, [1, 2, 4], 3])
bool_index = np.array([True, False, True, True, False,
False, True, True, False, True])
assert np.array_equal(fs[0].section[:, bool_index, :],
dat[:, bool_index, :])
assert np.array_equal(
fs[0].section[3:6, 3, :, ...], dat[3:6, 3, :, ...])
assert np.array_equal(fs[0].section[..., ::2], dat[..., ::2])
assert np.array_equal(fs[0].section[..., [1, 2, 4], 3],
dat[..., [1, 2, 4], 3])
fs.close()
def test_section_data_single(self):
a = np.array([1])
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp('test_new.fits'))
hdul = fits.open(self.temp('test_new.fits'))
sec = hdul[0].section
dat = hdul[0].data
assert np.array_equal(sec[0], dat[0])
assert np.array_equal(sec[...], dat[...])
assert np.array_equal(sec[..., 0], dat[..., 0])
assert np.array_equal(sec[0, ...], dat[0, ...])
hdul.close()
def test_section_data_square(self):
a = np.arange(4).reshape(2, 2)
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp('test_new.fits'))
hdul = fits.open(self.temp('test_new.fits'))
d = hdul[0]
dat = hdul[0].data
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[0, :] == dat[0, :]).all()
assert (d.section[1, :] == dat[1, :]).all()
assert (d.section[:, 0] == dat[:, 0]).all()
assert (d.section[:, 1] == dat[:, 1]).all()
assert (d.section[0, 0] == dat[0, 0]).all()
assert (d.section[0, 1] == dat[0, 1]).all()
assert (d.section[1, 0] == dat[1, 0]).all()
assert (d.section[1, 1] == dat[1, 1]).all()
assert (d.section[0:1, 0:1] == dat[0:1, 0:1]).all()
assert (d.section[0:2, 0:1] == dat[0:2, 0:1]).all()
assert (d.section[0:1, 0:2] == dat[0:1, 0:2]).all()
assert (d.section[0:2, 0:2] == dat[0:2, 0:2]).all()
hdul.close()
def test_section_data_cube(self):
a = np.arange(18).reshape(2, 3, 3)
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp('test_new.fits'))
hdul = fits.open(self.temp('test_new.fits'))
d = hdul[0]
dat = hdul[0].data
assert (d.section[:] == dat[:]).all()
assert (d.section[:, :] == dat[:, :]).all()
# Test that various combinations of indexing on the section are equal to
# indexing the data.
# Testing all combinations of scalar-index and [:] for each dimension.
for idx1 in [slice(None), 0, 1]:
for idx2 in [slice(None), 0, 1, 2]:
for idx3 in [slice(None), 0, 1, 2]:
nd_idx = (idx1, idx2, idx3)
assert (d.section[nd_idx] == dat[nd_idx]).all()
# Test all ways to slice the last dimension but keeping the first two.
for idx3 in [slice(0, 1), slice(0, 2), slice(0, 3),
slice(1, 2), slice(1, 3), slice(2, 3)]:
nd_idx = (slice(None), slice(None), idx3)
assert (d.section[nd_idx] == dat[nd_idx]).all()
# Test various combinations (not exhaustive) to slice all dimensions.
for idx1 in [slice(0, 1), slice(1, 2)]:
for idx2 in [slice(0, 1), slice(0, 2), slice(0, 3),
slice(1, 2), slice(1, 3)]:
for idx3 in [slice(0, 1), slice(0, 2), slice(0, 3),
slice(1, 2), slice(1, 3), slice(2, 3)]:
nd_idx = (idx1, idx2, idx3)
assert (d.section[nd_idx] == dat[nd_idx]).all()
hdul.close()
def test_section_data_four(self):
a = np.arange(256).reshape(4, 4, 4, 4)
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp('test_new.fits'))
hdul = fits.open(self.temp('test_new.fits'))
d = hdul[0]
dat = hdul[0].data
assert (d.section[:, :, :, :] == dat[:, :, :, :]).all()
assert (d.section[:, :, :] == dat[:, :, :]).all()
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[:] == dat[:]).all()
assert (d.section[0, :, :, :] == dat[0, :, :, :]).all()
assert (d.section[0, :, 0, :] == dat[0, :, 0, :]).all()
assert (d.section[:, :, 0, :] == dat[:, :, 0, :]).all()
assert (d.section[:, 1, 0, :] == dat[:, 1, 0, :]).all()
assert (d.section[:, :, :, 1] == dat[:, :, :, 1]).all()
hdul.close()
def test_section_data_scaled(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/143
This is like test_section_data_square but uses a file containing scaled
image data, to test that sections can work correctly with scaled data.
"""
hdul = fits.open(self.data('scale.fits'))
d = hdul[0]
dat = hdul[0].data
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[0, :] == dat[0, :]).all()
assert (d.section[1, :] == dat[1, :]).all()
assert (d.section[:, 0] == dat[:, 0]).all()
assert (d.section[:, 1] == dat[:, 1]).all()
assert (d.section[0, 0] == dat[0, 0]).all()
assert (d.section[0, 1] == dat[0, 1]).all()
assert (d.section[1, 0] == dat[1, 0]).all()
assert (d.section[1, 1] == dat[1, 1]).all()
assert (d.section[0:1, 0:1] == dat[0:1, 0:1]).all()
assert (d.section[0:2, 0:1] == dat[0:2, 0:1]).all()
assert (d.section[0:1, 0:2] == dat[0:1, 0:2]).all()
assert (d.section[0:2, 0:2] == dat[0:2, 0:2]).all()
hdul.close()
# Test without having accessed the full data first
hdul = fits.open(self.data('scale.fits'))
d = hdul[0]
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[0, :] == dat[0, :]).all()
assert (d.section[1, :] == dat[1, :]).all()
assert (d.section[:, 0] == dat[:, 0]).all()
assert (d.section[:, 1] == dat[:, 1]).all()
assert (d.section[0, 0] == dat[0, 0]).all()
assert (d.section[0, 1] == dat[0, 1]).all()
assert (d.section[1, 0] == dat[1, 0]).all()
assert (d.section[1, 1] == dat[1, 1]).all()
assert (d.section[0:1, 0:1] == dat[0:1, 0:1]).all()
assert (d.section[0:2, 0:1] == dat[0:2, 0:1]).all()
assert (d.section[0:1, 0:2] == dat[0:1, 0:2]).all()
assert (d.section[0:2, 0:2] == dat[0:2, 0:2]).all()
assert not d._data_loaded
hdul.close()
def test_do_not_scale_image_data(self):
with fits.open(self.data('scale.fits'), do_not_scale_image_data=True) as hdul:
assert hdul[0].data.dtype == np.dtype('>i2')
with fits.open(self.data('scale.fits')) as hdul:
assert hdul[0].data.dtype == np.dtype('float32')
def test_append_uint_data(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/56
(BZERO and BSCALE added in the wrong location when appending scaled
data)
"""
fits.writeto(self.temp('test_new.fits'), data=np.array([],
dtype='uint8'))
d = np.zeros([100, 100]).astype('uint16')
fits.append(self.temp('test_new.fits'), data=d)
with fits.open(self.temp('test_new.fits'), uint=True) as f:
assert f[1].data.dtype == 'uint16'
def test_scale_with_explicit_bzero_bscale(self):
"""
Regression test for https://github.com/astropy/astropy/issues/6399
"""
hdu2 = fits.ImageHDU(np.random.rand(100, 100))
# The line below raised an exception in astropy 2.0, so if it does not
# raise an error here, that is progress.
hdu2.scale(type='uint8', bscale=1, bzero=0)
def test_uint_header_consistency(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2305
This ensures that an HDU containing unsigned integer data always has
the appropriate BZERO value in its header.
"""
for int_size in (16, 32, 64):
# Just make an array of some unsigned ints that wouldn't fit in a
# signed int array of the same bit width
max_uint = (2 ** int_size) - 1
if int_size == 64:
max_uint = np.uint64(int_size)
dtype = f'uint{int_size}'
arr = np.empty(100, dtype=dtype)
arr.fill(max_uint)
arr -= np.arange(100, dtype=dtype)
uint_hdu = fits.PrimaryHDU(data=arr)
assert np.all(uint_hdu.data == arr)
assert uint_hdu.data.dtype.name == f'uint{int_size}'
assert 'BZERO' in uint_hdu.header
assert uint_hdu.header['BZERO'] == (2 ** (int_size - 1))
filename = f'uint{int_size}.fits'
uint_hdu.writeto(self.temp(filename))
with fits.open(self.temp(filename), uint=True) as hdul:
new_uint_hdu = hdul[0]
assert np.all(new_uint_hdu.data == arr)
assert new_uint_hdu.data.dtype.name == f'uint{int_size}'
assert 'BZERO' in new_uint_hdu.header
assert new_uint_hdu.header['BZERO'] == (2 ** (int_size - 1))
@pytest.mark.parametrize(('from_file'), (False, True))
@pytest.mark.parametrize(('do_not_scale'), (False,))
def test_uint_header_keywords_removed_after_bitpix_change(self,
from_file,
do_not_scale):
"""
Regression test for https://github.com/astropy/astropy/issues/4974
BZERO/BSCALE should be removed if data is converted to a floating
point type.
Currently excluding the case where do_not_scale_image_data=True
because it is not clear what the expectation should be.
"""
arr = np.zeros(100, dtype='uint16')
if from_file:
# To generate the proper input file we always want to scale the
# data before writing it...otherwise when we open it will be
# regular (signed) int data.
tmp_uint = fits.PrimaryHDU(arr)
filename = 'unsigned_int.fits'
tmp_uint.writeto(self.temp(filename))
with fits.open(self.temp(filename),
do_not_scale_image_data=do_not_scale) as f:
uint_hdu = f[0]
# Force a read before we close.
_ = uint_hdu.data
else:
uint_hdu = fits.PrimaryHDU(arr,
do_not_scale_image_data=do_not_scale)
# Make sure appropriate keywords are in the header. See
# https://github.com/astropy/astropy/pull/3916#issuecomment-122414532
# for discussion.
assert 'BSCALE' in uint_hdu.header
assert 'BZERO' in uint_hdu.header
assert uint_hdu.header['BSCALE'] == 1
assert uint_hdu.header['BZERO'] == 32768
# Convert data to floating point...
uint_hdu.data = uint_hdu.data * 1.0
# ...bitpix should be negative.
assert uint_hdu.header['BITPIX'] < 0
# BSCALE and BZERO should NOT be in header any more.
assert 'BSCALE' not in uint_hdu.header
assert 'BZERO' not in uint_hdu.header
# This is the main test...the data values should round trip
# as zero.
filename = 'test_uint_to_float.fits'
uint_hdu.writeto(self.temp(filename))
with fits.open(self.temp(filename)) as hdul:
assert (hdul[0].data == 0).all()
def test_blanks(self):
"""Test image data with blank spots in it (which should show up as
NaNs in the data array.
"""
arr = np.zeros((10, 10), dtype=np.int32)
# One row will be blanks
arr[1] = 999
hdu = fits.ImageHDU(data=arr)
hdu.header['BLANK'] = 999
hdu.writeto(self.temp('test_new.fits'))
with fits.open(self.temp('test_new.fits')) as hdul:
assert np.isnan(hdul[1].data[1]).all()
def test_invalid_blanks(self):
"""
Test that invalid use of the BLANK keyword leads to an appropriate
warning, and that the BLANK keyword is ignored when returning the
HDU data.
Regression test for https://github.com/astropy/astropy/issues/3865
"""
arr = np.arange(5, dtype=np.float64)
hdu = fits.PrimaryHDU(data=arr)
hdu.header['BLANK'] = 2
with pytest.warns(AstropyUserWarning, match="Invalid 'BLANK' keyword in header") as w:
hdu.writeto(self.temp('test_new.fits'))
# Allow the HDU to be written, but there should be a warning
# when writing a header with BLANK when then data is not
# int
assert len(w) == 1
# Should also get a warning when opening the file, and the BLANK
# value should not be applied
with pytest.warns(AstropyUserWarning, match="Invalid 'BLANK' keyword in header") as w:
with fits.open(self.temp('test_new.fits')) as h:
assert np.all(arr == h[0].data)
assert len(w) == 1
@pytest.mark.filterwarnings("ignore:Invalid 'BLANK' keyword in header")
def test_scale_back_with_blanks(self):
"""
Test that when auto-rescaling integer data with "blank" values (where
the blanks are replaced by NaN in the float data), that the "BLANK"
keyword is removed from the header.
Further, test that when using the ``scale_back=True`` option the blank
values are restored properly.
Regression test for https://github.com/astropy/astropy/issues/3865
"""
# Make the sample file
arr = np.arange(5, dtype=np.int32)
hdu = fits.PrimaryHDU(data=arr)
hdu.scale('int16', bscale=1.23)
# Creating data that uses BLANK is currently kludgy--a separate issue
# TODO: Rewrite this test when scaling with blank support is better
# supported
# Let's just add a value to the data that should be converted to NaN
# when it is read back in:
filename = self.temp('test.fits')
hdu.data[0] = 9999
hdu.header['BLANK'] = 9999
hdu.writeto(filename)
with fits.open(filename) as hdul:
data = hdul[0].data
assert np.isnan(data[0])
with pytest.warns(fits.verify.VerifyWarning,
match=r"Invalid 'BLANK' keyword in header"):
hdul.writeto(self.temp('test2.fits'))
# Now reopen the newly written file. It should not have a 'BLANK'
# keyword
with fits.open(self.temp('test2.fits')) as hdul2:
assert 'BLANK' not in hdul2[0].header
data = hdul2[0].data
assert np.isnan(data[0])
# Finally, test that scale_back keeps the BLANKs correctly
with fits.open(filename, scale_back=True,
mode='update') as hdul3:
data = hdul3[0].data
# This emits warning that pytest cannot catch properly, so we
# catch it with pytest.mark.filterwarnings above.
assert np.isnan(data[0])
with fits.open(filename,
do_not_scale_image_data=True) as hdul4:
assert hdul4[0].header['BLANK'] == 9999
assert hdul4[0].header['BSCALE'] == 1.23
assert hdul4[0].data[0] == 9999
def test_bzero_with_floats(self):
"""Test use of the BZERO keyword in an image HDU containing float
data.
"""
arr = np.zeros((10, 10)) - 1
hdu = fits.ImageHDU(data=arr)
hdu.header['BZERO'] = 1.0
hdu.writeto(self.temp('test_new.fits'))
with fits.open(self.temp('test_new.fits')) as hdul:
arr += 1
assert (hdul[1].data == arr).all()
def test_rewriting_large_scaled_image(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/84 and
https://aeon.stsci.edu/ssb/trac/pyfits/ticket/101
"""
hdul = fits.open(self.data('fixed-1890.fits'))
orig_data = hdul[0].data
hdul.writeto(self.temp('test_new.fits'), overwrite=True)
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[0].data == orig_data).all()
hdul.close()
# Just as before, but this time don't touch hdul[0].data before writing
# back out--this is the case that failed in
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/84
hdul = fits.open(self.data('fixed-1890.fits'))
hdul.writeto(self.temp('test_new.fits'), overwrite=True)
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[0].data == orig_data).all()
hdul.close()
# Test opening/closing/reopening a scaled file in update mode
hdul = fits.open(self.data('fixed-1890.fits'),
do_not_scale_image_data=True)
hdul.writeto(self.temp('test_new.fits'), overwrite=True,
output_verify='silentfix')
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
orig_data = hdul[0].data
hdul.close()
hdul = fits.open(self.temp('test_new.fits'), mode='update')
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[0].data == orig_data).all()
hdul.close()
def test_image_update_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/105
Replacing the original header to an image HDU and saving should update
the NAXISn keywords appropriately and save the image data correctly.
"""
# Copy the original file before saving to it
self.copy_file('test0.fits')
with fits.open(self.temp('test0.fits'), mode='update') as hdul:
orig_data = hdul[1].data.copy()
hdr_copy = hdul[1].header.copy()
del hdr_copy['NAXIS*']
hdul[1].header = hdr_copy
with fits.open(self.temp('test0.fits')) as hdul:
assert (orig_data == hdul[1].data).all()
def test_open_scaled_in_update_mode(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/119
(Don't update scaled image data if the data is not read)
This ensures that merely opening and closing a file containing scaled
image data does not cause any change to the data (or the header).
Changes should only occur if the data is accessed.
"""
# Copy the original file before making any possible changes to it
self.copy_file('scale.fits')
mtime = os.stat(self.temp('scale.fits')).st_mtime
time.sleep(1)
fits.open(self.temp('scale.fits'), mode='update').close()
# Ensure that no changes were made to the file merely by immediately
# opening and closing it.
assert mtime == os.stat(self.temp('scale.fits')).st_mtime
# Insert a slight delay to ensure the mtime does change when the file
# is changed
time.sleep(1)
hdul = fits.open(self.temp('scale.fits'), 'update')
orig_data = hdul[0].data
hdul.close()
# Now the file should be updated with the rescaled data
assert mtime != os.stat(self.temp('scale.fits')).st_mtime
hdul = fits.open(self.temp('scale.fits'), mode='update')
assert hdul[0].data.dtype == np.dtype('>f4')
assert hdul[0].header['BITPIX'] == -32
assert 'BZERO' not in hdul[0].header
assert 'BSCALE' not in hdul[0].header
assert (orig_data == hdul[0].data).all()
# Try reshaping the data, then closing and reopening the file; let's
# see if all the changes are preserved properly
hdul[0].data.shape = (42, 10)
hdul.close()
hdul = fits.open(self.temp('scale.fits'))
assert hdul[0].shape == (42, 10)
assert hdul[0].data.dtype == np.dtype('>f4')
assert hdul[0].header['BITPIX'] == -32
assert 'BZERO' not in hdul[0].header
assert 'BSCALE' not in hdul[0].header
hdul.close()
def test_scale_back(self):
"""A simple test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/120
The scale_back feature for image HDUs.
"""
self.copy_file('scale.fits')
with fits.open(self.temp('scale.fits'), mode='update',
scale_back=True) as hdul:
orig_bitpix = hdul[0].header['BITPIX']
orig_bzero = hdul[0].header['BZERO']
orig_bscale = hdul[0].header['BSCALE']
orig_data = hdul[0].data.copy()
hdul[0].data[0] = 0
with fits.open(self.temp('scale.fits'),
do_not_scale_image_data=True) as hdul:
assert hdul[0].header['BITPIX'] == orig_bitpix
assert hdul[0].header['BZERO'] == orig_bzero
assert hdul[0].header['BSCALE'] == orig_bscale
zero_point = int(math.floor(-orig_bzero / orig_bscale))
assert (hdul[0].data[0] == zero_point).all()
with fits.open(self.temp('scale.fits')) as hdul:
assert (hdul[0].data[1:] == orig_data[1:]).all()
def test_image_none(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/27
"""
with fits.open(self.data('test0.fits')) as h:
h[1].data
h[1].data = None
h[1].writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
assert h[1].data is None
assert h[1].header['NAXIS'] == 0
assert 'NAXIS1' not in h[1].header
assert 'NAXIS2' not in h[1].header
def test_invalid_blank(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2711
If the BLANK keyword contains an invalid value it should be ignored for
any calculations (though a warning should be issued).
"""
data = np.arange(100, dtype=np.float64)
hdu = fits.PrimaryHDU(data)
hdu.header['BLANK'] = 'nan'
with pytest.warns(fits.verify.VerifyWarning, match=r"Invalid value for "
r"'BLANK' keyword in header: 'nan'"):
hdu.writeto(self.temp('test.fits'))
with pytest.warns(AstropyUserWarning) as w:
with fits.open(self.temp('test.fits')) as hdul:
assert np.all(hdul[0].data == data)
assert len(w) == 2
msg = "Invalid value for 'BLANK' keyword in header"
assert msg in str(w[0].message)
msg = "Invalid 'BLANK' keyword"
assert msg in str(w[1].message)
def test_scaled_image_fromfile(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2710
"""
# Make some sample data
a = np.arange(100, dtype=np.float32)
hdu = fits.PrimaryHDU(data=a.copy())
hdu.scale(bscale=1.1)
hdu.writeto(self.temp('test.fits'))
with open(self.temp('test.fits'), 'rb') as f:
file_data = f.read()
hdul = fits.HDUList.fromstring(file_data)
assert np.allclose(hdul[0].data, a)
def test_set_data(self):
"""
Test data assignment - issue #5087
"""
im = fits.ImageHDU()
ar = np.arange(12)
im.data = ar
def test_scale_bzero_with_int_data(self):
"""
Regression test for https://github.com/astropy/astropy/issues/4600
"""
a = np.arange(100, 200, dtype=np.int16)
hdu1 = fits.PrimaryHDU(data=a.copy())
hdu2 = fits.PrimaryHDU(data=a.copy())
# Previously the following line would throw a TypeError,
# now it should be identical to the integer bzero case
hdu1.scale('int16', bzero=99.0)
hdu2.scale('int16', bzero=99)
assert np.allclose(hdu1.data, hdu2.data)
def test_scale_back_uint_assignment(self):
"""
Extend fix for #4600 to assignment to data
Suggested by:
https://github.com/astropy/astropy/pull/4602#issuecomment-208713748
"""
a = np.arange(100, 200, dtype=np.uint16)
fits.PrimaryHDU(a).writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits'), mode="update",
scale_back=True) as (hdu,):
hdu.data[:] = 0
assert np.allclose(hdu.data, 0)
def test_hdu_creation_with_scalar(self):
msg = r'data object array\(1\) should have at least one dimension'
with pytest.raises(TypeError, match=msg):
fits.ImageHDU(data=1)
with pytest.raises(TypeError, match=msg):
fits.PrimaryHDU(data=1)
class TestCompressedImage(FitsTestCase):
def test_empty(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2595
"""
hdu = fits.CompImageHDU()
assert hdu.data is None
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits'), mode='update') as hdul:
assert len(hdul) == 2
assert isinstance(hdul[1], fits.CompImageHDU)
assert hdul[1].data is None
# Now test replacing the empty data with an array and see what
# happens
hdul[1].data = np.arange(100, dtype=np.int32)
with fits.open(self.temp('test.fits')) as hdul:
assert len(hdul) == 2
assert isinstance(hdul[1], fits.CompImageHDU)
assert np.all(hdul[1].data == np.arange(100, dtype=np.int32))
@pytest.mark.parametrize(
('data', 'compression_type', 'quantize_level'),
[(np.zeros((2, 10, 10), dtype=np.float32), 'RICE_1', 16),
(np.zeros((2, 10, 10), dtype=np.float32), 'GZIP_1', -0.01),
(np.zeros((2, 10, 10), dtype=np.float32), 'GZIP_2', -0.01),
(np.zeros((100, 100)) + 1, 'HCOMPRESS_1', 16),
(np.zeros((10, 10)), 'PLIO_1', 16)])
@pytest.mark.parametrize('byte_order', ['<', '>'])
def test_comp_image(self, data, compression_type, quantize_level,
byte_order):
data = data.newbyteorder(byte_order)
primary_hdu = fits.PrimaryHDU()
ofd = fits.HDUList(primary_hdu)
chdu = fits.CompImageHDU(data, name='SCI',
compression_type=compression_type,
quantize_level=quantize_level)
ofd.append(chdu)
ofd.writeto(self.temp('test_new.fits'), overwrite=True)
ofd.close()
with fits.open(self.temp('test_new.fits')) as fd:
assert (fd[1].data == data).all()
assert fd[1].header['NAXIS'] == chdu.header['NAXIS']
assert fd[1].header['NAXIS1'] == chdu.header['NAXIS1']
assert fd[1].header['NAXIS2'] == chdu.header['NAXIS2']
assert fd[1].header['BITPIX'] == chdu.header['BITPIX']
@pytest.mark.skipif('not HAS_SCIPY')
def test_comp_image_quantize_level(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5969
Test that quantize_level is used.
"""
import scipy.misc
np.random.seed(42)
data = scipy.misc.ascent() + np.random.randn(512, 512)*10
fits.ImageHDU(data).writeto(self.temp('im1.fits'))
fits.CompImageHDU(data, compression_type='RICE_1', quantize_method=1,
quantize_level=-1, dither_seed=5)\
.writeto(self.temp('im2.fits'))
fits.CompImageHDU(data, compression_type='RICE_1', quantize_method=1,
quantize_level=-100, dither_seed=5)\
.writeto(self.temp('im3.fits'))
im1 = fits.getdata(self.temp('im1.fits'))
im2 = fits.getdata(self.temp('im2.fits'))
im3 = fits.getdata(self.temp('im3.fits'))
assert not np.array_equal(im2, im3)
assert np.isclose(np.min(im1 - im2), -0.5, atol=1e-3)
assert np.isclose(np.max(im1 - im2), 0.5, atol=1e-3)
assert np.isclose(np.min(im1 - im3), -50, atol=1e-1)
assert np.isclose(np.max(im1 - im3), 50, atol=1e-1)
def test_comp_image_hcompression_1_invalid_data(self):
"""
Tests compression with the HCOMPRESS_1 algorithm with data that is
not 2D and has a non-2D tile size.
"""
pytest.raises(ValueError, fits.CompImageHDU,
np.zeros((2, 10, 10), dtype=np.float32), name='SCI',
compression_type='HCOMPRESS_1', quantize_level=16,
tile_size=[2, 10, 10])
def test_comp_image_hcompress_image_stack(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/171
Tests that data containing more than two dimensions can be
compressed with HCOMPRESS_1 so long as the user-supplied tile size can
be flattened to two dimensions.
"""
cube = np.arange(300, dtype=np.float32).reshape(3, 10, 10)
hdu = fits.CompImageHDU(data=cube, name='SCI',
compression_type='HCOMPRESS_1',
quantize_level=16, tile_size=[5, 5, 1])
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
# HCOMPRESSed images are allowed to deviate from the original by
# about 1/quantize_level of the RMS in each tile.
assert np.abs(hdul['SCI'].data - cube).max() < 1./15.
def test_subtractive_dither_seed(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/32
Ensure that when floating point data is compressed with the
SUBTRACTIVE_DITHER_1 quantization method that the correct ZDITHER0 seed
is added to the header, and that the data can be correctly
decompressed.
"""
array = np.arange(100.0).reshape(10, 10)
csum = (array[0].view('uint8').sum() % 10000) + 1
hdu = fits.CompImageHDU(data=array,
quantize_method=SUBTRACTIVE_DITHER_1,
dither_seed=DITHER_SEED_CHECKSUM)
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert isinstance(hdul[1], fits.CompImageHDU)
assert 'ZQUANTIZ' in hdul[1]._header
assert hdul[1]._header['ZQUANTIZ'] == 'SUBTRACTIVE_DITHER_1'
assert 'ZDITHER0' in hdul[1]._header
assert hdul[1]._header['ZDITHER0'] == csum
assert np.all(hdul[1].data == array)
def test_disable_image_compression(self):
with fits.open(self.data('comp.fits'),
disable_image_compression=True) as hdul:
# The compressed image HDU should show up as a BinTableHDU, but
# *not* a CompImageHDU
assert isinstance(hdul[1], fits.BinTableHDU)
assert not isinstance(hdul[1], fits.CompImageHDU)
with fits.open(self.data('comp.fits')) as hdul:
assert isinstance(hdul[1], fits.CompImageHDU)
def test_open_comp_image_in_update_mode(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/167
Similar to test_open_scaled_in_update_mode(), but specifically for
compressed images.
"""
# Copy the original file before making any possible changes to it
self.copy_file('comp.fits')
mtime = os.stat(self.temp('comp.fits')).st_mtime
time.sleep(1)
fits.open(self.temp('comp.fits'), mode='update').close()
# Ensure that no changes were made to the file merely by immediately
# opening and closing it.
assert mtime == os.stat(self.temp('comp.fits')).st_mtime
@pytest.mark.slow
def test_open_scaled_in_update_mode_compressed(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/88 2
Identical to test_open_scaled_in_update_mode() but with a compressed
version of the scaled image.
"""
# Copy+compress the original file before making any possible changes to
# it
with fits.open(self.data('scale.fits'),
do_not_scale_image_data=True) as hdul:
chdu = fits.CompImageHDU(data=hdul[0].data,
header=hdul[0].header)
chdu.writeto(self.temp('scale.fits'))
mtime = os.stat(self.temp('scale.fits')).st_mtime
time.sleep(1)
fits.open(self.temp('scale.fits'), mode='update').close()
# Ensure that no changes were made to the file merely by immediately
# opening and closing it.
assert mtime == os.stat(self.temp('scale.fits')).st_mtime
# Insert a slight delay to ensure the mtime does change when the file
# is changed
time.sleep(1)
hdul = fits.open(self.temp('scale.fits'), 'update')
hdul[1].data
hdul.close()
# Now the file should be updated with the rescaled data
assert mtime != os.stat(self.temp('scale.fits')).st_mtime
hdul = fits.open(self.temp('scale.fits'), mode='update')
assert hdul[1].data.dtype == np.dtype('float32')
assert hdul[1].header['BITPIX'] == -32
assert 'BZERO' not in hdul[1].header
assert 'BSCALE' not in hdul[1].header
# Try reshaping the data, then closing and reopening the file; let's
# see if all the changes are preserved properly
hdul[1].data.shape = (42, 10)
hdul.close()
hdul = fits.open(self.temp('scale.fits'))
assert hdul[1].shape == (42, 10)
assert hdul[1].data.dtype == np.dtype('float32')
assert hdul[1].header['BITPIX'] == -32
assert 'BZERO' not in hdul[1].header
assert 'BSCALE' not in hdul[1].header
hdul.close()
def test_write_comp_hdu_direct_from_existing(self):
with fits.open(self.data('comp.fits')) as hdul:
hdul[1].writeto(self.temp('test.fits'))
with fits.open(self.data('comp.fits')) as hdul1:
with fits.open(self.temp('test.fits')) as hdul2:
assert np.all(hdul1[1].data == hdul2[1].data)
assert comparerecords(hdul1[1].compressed_data,
hdul2[1].compressed_data)
def test_rewriting_large_scaled_image_compressed(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/88 1
Identical to test_rewriting_large_scaled_image() but with a compressed
image.
"""
with fits.open(self.data('fixed-1890.fits'),
do_not_scale_image_data=True) as hdul:
chdu = fits.CompImageHDU(data=hdul[0].data,
header=hdul[0].header)
chdu.writeto(self.temp('fixed-1890-z.fits'))
hdul = fits.open(self.temp('fixed-1890-z.fits'))
orig_data = hdul[1].data
hdul.writeto(self.temp('test_new.fits'), overwrite=True)
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[1].data == orig_data).all()
hdul.close()
# Just as before, but this time don't touch hdul[0].data before writing
# back out--this is the case that failed in
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/84
hdul = fits.open(self.temp('fixed-1890-z.fits'))
hdul.writeto(self.temp('test_new.fits'), overwrite=True)
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[1].data == orig_data).all()
hdul.close()
# Test opening/closing/reopening a scaled file in update mode
hdul = fits.open(self.temp('fixed-1890-z.fits'),
do_not_scale_image_data=True)
hdul.writeto(self.temp('test_new.fits'), overwrite=True,
output_verify='silentfix')
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
orig_data = hdul[1].data
hdul.close()
hdul = fits.open(self.temp('test_new.fits'), mode='update')
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[1].data == orig_data).all()
hdul.close()
def test_scale_back_compressed(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/88 3
Identical to test_scale_back() but uses a compressed image.
"""
# Create a compressed version of the scaled image
with fits.open(self.data('scale.fits'),
do_not_scale_image_data=True) as hdul:
chdu = fits.CompImageHDU(data=hdul[0].data,
header=hdul[0].header)
chdu.writeto(self.temp('scale.fits'))
with fits.open(self.temp('scale.fits'), mode='update',
scale_back=True) as hdul:
orig_bitpix = hdul[1].header['BITPIX']
orig_bzero = hdul[1].header['BZERO']
orig_bscale = hdul[1].header['BSCALE']
orig_data = hdul[1].data.copy()
hdul[1].data[0] = 0
with fits.open(self.temp('scale.fits'),
do_not_scale_image_data=True) as hdul:
assert hdul[1].header['BITPIX'] == orig_bitpix
assert hdul[1].header['BZERO'] == orig_bzero
assert hdul[1].header['BSCALE'] == orig_bscale
zero_point = int(math.floor(-orig_bzero / orig_bscale))
assert (hdul[1].data[0] == zero_point).all()
with fits.open(self.temp('scale.fits')) as hdul:
assert (hdul[1].data[1:] == orig_data[1:]).all()
# Extra test to ensure that after everything the data is still the
# same as in the original uncompressed version of the image
with fits.open(self.data('scale.fits')) as hdul2:
# Recall we made the same modification to the data in hdul
# above
hdul2[0].data[0] = 0
assert (hdul[1].data == hdul2[0].data).all()
def test_lossless_gzip_compression(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/198"""
rng = np.random.default_rng(42)
noise = rng.normal(size=(20, 20))
chdu1 = fits.CompImageHDU(data=noise, compression_type='GZIP_1')
# First make a test image with lossy compression and make sure it
# wasn't compressed perfectly. This shouldn't happen ever, but just to
# make sure the test non-trivial.
chdu1.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
assert np.abs(noise - h[1].data).max() > 0.0
del h
chdu2 = fits.CompImageHDU(data=noise, compression_type='GZIP_1',
quantize_level=0.0) # No quantization
chdu2.writeto(self.temp('test.fits'), overwrite=True)
with fits.open(self.temp('test.fits')) as h:
assert (noise == h[1].data).all()
def test_compression_column_tforms(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/199"""
# Some interestingly tiled data so that some of it is quantized and
# some of it ends up just getting gzip-compressed
data2 = ((np.arange(1, 8, dtype=np.float32) * 10)[:, np.newaxis] +
np.arange(1, 7))
np.random.seed(1337)
data1 = np.random.uniform(size=(6 * 4, 7 * 4))
data1[:data2.shape[0], :data2.shape[1]] = data2
chdu = fits.CompImageHDU(data1, compression_type='RICE_1',
tile_size=(6, 7))
chdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits'),
disable_image_compression=True) as h:
assert re.match(r'^1PB\(\d+\)$', h[1].header['TFORM1'])
assert re.match(r'^1PB\(\d+\)$', h[1].header['TFORM2'])
def test_compression_update_header(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/23
"""
self.copy_file('comp.fits')
with fits.open(self.temp('comp.fits'), mode='update') as hdul:
assert isinstance(hdul[1], fits.CompImageHDU)
hdul[1].header['test1'] = 'test'
hdul[1]._header['test2'] = 'test2'
with fits.open(self.temp('comp.fits')) as hdul:
assert 'test1' in hdul[1].header
assert hdul[1].header['test1'] == 'test'
assert 'test2' in hdul[1].header
assert hdul[1].header['test2'] == 'test2'
# Test update via index now:
with fits.open(self.temp('comp.fits'), mode='update') as hdul:
hdr = hdul[1].header
hdr[hdr.index('TEST1')] = 'foo'
with fits.open(self.temp('comp.fits')) as hdul:
assert hdul[1].header['TEST1'] == 'foo'
# Test slice updates
with fits.open(self.temp('comp.fits'), mode='update') as hdul:
hdul[1].header['TEST*'] = 'qux'
with fits.open(self.temp('comp.fits')) as hdul:
assert list(hdul[1].header['TEST*'].values()) == ['qux', 'qux']
with fits.open(self.temp('comp.fits'), mode='update') as hdul:
hdr = hdul[1].header
idx = hdr.index('TEST1')
hdr[idx:idx + 2] = 'bar'
with fits.open(self.temp('comp.fits')) as hdul:
assert list(hdul[1].header['TEST*'].values()) == ['bar', 'bar']
# Test updating a specific COMMENT card duplicate
with fits.open(self.temp('comp.fits'), mode='update') as hdul:
hdul[1].header[('COMMENT', 1)] = 'I am fire. I am death!'
with fits.open(self.temp('comp.fits')) as hdul:
assert hdul[1].header['COMMENT'][1] == 'I am fire. I am death!'
assert hdul[1]._header['COMMENT'][1] == 'I am fire. I am death!'
# Test deleting by keyword and by slice
with fits.open(self.temp('comp.fits'), mode='update') as hdul:
hdr = hdul[1].header
del hdr['COMMENT']
idx = hdr.index('TEST1')
del hdr[idx:idx + 2]
with fits.open(self.temp('comp.fits')) as hdul:
assert 'COMMENT' not in hdul[1].header
assert 'COMMENT' not in hdul[1]._header
assert 'TEST1' not in hdul[1].header
assert 'TEST1' not in hdul[1]._header
assert 'TEST2' not in hdul[1].header
assert 'TEST2' not in hdul[1]._header
def test_compression_update_header_with_reserved(self):
"""
Ensure that setting reserved keywords related to the table data
structure on CompImageHDU image headers fails.
"""
def test_set_keyword(hdr, keyword, value):
with pytest.warns(UserWarning) as w:
hdr[keyword] = value
assert len(w) == 1
assert str(w[0].message).startswith(
f"Keyword {keyword!r} is reserved")
assert keyword not in hdr
with fits.open(self.data('comp.fits')) as hdul:
hdr = hdul[1].header
test_set_keyword(hdr, 'TFIELDS', 8)
test_set_keyword(hdr, 'TTYPE1', 'Foo')
test_set_keyword(hdr, 'ZCMPTYPE', 'ASDF')
test_set_keyword(hdr, 'ZVAL1', 'Foo')
def test_compression_header_append(self):
with fits.open(self.data('comp.fits')) as hdul:
imghdr = hdul[1].header
tblhdr = hdul[1]._header
with pytest.warns(UserWarning, match="Keyword 'TFIELDS' is reserved") as w:
imghdr.append('TFIELDS')
assert len(w) == 1
assert 'TFIELDS' not in imghdr
imghdr.append(('FOO', 'bar', 'qux'), end=True)
assert 'FOO' in imghdr
assert imghdr[-1] == 'bar'
assert 'FOO' in tblhdr
assert tblhdr[-1] == 'bar'
imghdr.append(('CHECKSUM', 'abcd1234'))
assert 'CHECKSUM' in imghdr
assert imghdr['CHECKSUM'] == 'abcd1234'
assert 'CHECKSUM' not in tblhdr
assert 'ZHECKSUM' in tblhdr
assert tblhdr['ZHECKSUM'] == 'abcd1234'
def test_compression_header_append2(self):
"""
Regression test for issue https://github.com/astropy/astropy/issues/5827
"""
with fits.open(self.data('comp.fits')) as hdul:
header = hdul[1].header
while (len(header) < 1000):
header.append() # pad with grow room
# Append stats to header:
header.append(("Q1_OSAVG", 1, "[adu] quadrant 1 overscan mean"))
header.append(("Q1_OSSTD", 1, "[adu] quadrant 1 overscan stddev"))
header.append(("Q1_OSMED", 1, "[adu] quadrant 1 overscan median"))
def test_compression_header_insert(self):
with fits.open(self.data('comp.fits')) as hdul:
imghdr = hdul[1].header
tblhdr = hdul[1]._header
# First try inserting a restricted keyword
with pytest.warns(UserWarning, match="Keyword 'TFIELDS' is reserved") as w:
imghdr.insert(1000, 'TFIELDS')
assert len(w) == 1
assert 'TFIELDS' not in imghdr
assert tblhdr.count('TFIELDS') == 1
# First try keyword-relative insert
imghdr.insert('TELESCOP', ('OBSERVER', 'Phil Plait'))
assert 'OBSERVER' in imghdr
assert imghdr.index('OBSERVER') == imghdr.index('TELESCOP') - 1
assert 'OBSERVER' in tblhdr
assert tblhdr.index('OBSERVER') == tblhdr.index('TELESCOP') - 1
# Next let's see if an index-relative insert winds up being
# sensible
idx = imghdr.index('OBSERVER')
imghdr.insert('OBSERVER', ('FOO',))
assert 'FOO' in imghdr
assert imghdr.index('FOO') == idx
assert 'FOO' in tblhdr
assert tblhdr.index('FOO') == tblhdr.index('OBSERVER') - 1
def test_compression_header_set_before_after(self):
with fits.open(self.data('comp.fits')) as hdul:
imghdr = hdul[1].header
tblhdr = hdul[1]._header
with pytest.warns(UserWarning, match="Keyword 'ZBITPIX' is reserved ") as w:
imghdr.set('ZBITPIX', 77, 'asdf', after='XTENSION')
assert len(w) == 1
assert 'ZBITPIX' not in imghdr
assert tblhdr.count('ZBITPIX') == 1
assert tblhdr['ZBITPIX'] != 77
# Move GCOUNT before PCOUNT (not that there's any reason you'd
# *want* to do that, but it's just a test...)
imghdr.set('GCOUNT', 99, before='PCOUNT')
assert imghdr.index('GCOUNT') == imghdr.index('PCOUNT') - 1
assert imghdr['GCOUNT'] == 99
assert tblhdr.index('ZGCOUNT') == tblhdr.index('ZPCOUNT') - 1
assert tblhdr['ZGCOUNT'] == 99
assert tblhdr.index('PCOUNT') == 5
assert tblhdr.index('GCOUNT') == 6
assert tblhdr['GCOUNT'] == 1
imghdr.set('GCOUNT', 2, after='PCOUNT')
assert imghdr.index('GCOUNT') == imghdr.index('PCOUNT') + 1
assert imghdr['GCOUNT'] == 2
assert tblhdr.index('ZGCOUNT') == tblhdr.index('ZPCOUNT') + 1
assert tblhdr['ZGCOUNT'] == 2
assert tblhdr.index('PCOUNT') == 5
assert tblhdr.index('GCOUNT') == 6
assert tblhdr['GCOUNT'] == 1
def test_compression_header_append_commentary(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2363
"""
hdu = fits.CompImageHDU(np.array([0], dtype=np.int32))
hdu.header['COMMENT'] = 'hello world'
assert hdu.header['COMMENT'] == ['hello world']
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert hdul[1].header['COMMENT'] == ['hello world']
def test_compression_with_gzip_column(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/71
"""
arr = np.zeros((2, 7000), dtype='float32')
# The first row (which will be the first compressed tile) has a very
# wide range of values that will be difficult to quantize, and should
# result in use of a GZIP_COMPRESSED_DATA column
arr[0] = np.linspace(0, 1, 7000)
arr[1] = np.random.normal(size=7000)
hdu = fits.CompImageHDU(data=arr)
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
comp_hdu = hdul[1]
# GZIP-compressed tile should compare exactly
assert np.all(comp_hdu.data[0] == arr[0])
# The second tile uses lossy compression and may be somewhat off,
# so we don't bother comparing it exactly
def test_duplicate_compression_header_keywords(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2750
Tests that the fake header (for the compressed image) can still be read
even if the real header contained a duplicate ZTENSION keyword (the
issue applies to any keyword specific to the compression convention,
however).
"""
arr = np.arange(100, dtype=np.int32)
hdu = fits.CompImageHDU(data=arr)
header = hdu._header
# append the duplicate keyword
hdu._header.append(('ZTENSION', 'IMAGE'))
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert header == hdul[1]._header
# There's no good reason to have a duplicate keyword, but
# technically it isn't invalid either :/
assert hdul[1]._header.count('ZTENSION') == 2
def test_scale_bzero_with_compressed_int_data(self):
"""
Regression test for https://github.com/astropy/astropy/issues/4600
and https://github.com/astropy/astropy/issues/4588
Identical to test_scale_bzero_with_int_data() but uses a compressed
image.
"""
a = np.arange(100, 200, dtype=np.int16)
hdu1 = fits.CompImageHDU(data=a.copy())
hdu2 = fits.CompImageHDU(data=a.copy())
# Previously the following line would throw a TypeError,
# now it should be identical to the integer bzero case
hdu1.scale('int16', bzero=99.0)
hdu2.scale('int16', bzero=99)
assert np.allclose(hdu1.data, hdu2.data)
def test_scale_back_compressed_uint_assignment(self):
"""
Extend fix for #4600 to assignment to data
Identical to test_scale_back_uint_assignment() but uses a compressed
image.
Suggested by:
https://github.com/astropy/astropy/pull/4602#issuecomment-208713748
"""
a = np.arange(100, 200, dtype=np.uint16)
fits.CompImageHDU(a).writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits'), mode="update",
scale_back=True) as hdul:
hdul[1].data[:] = 0
assert np.allclose(hdul[1].data, 0)
def test_compressed_header_missing_znaxis(self):
a = np.arange(100, 200, dtype=np.uint16)
comp_hdu = fits.CompImageHDU(a)
comp_hdu._header.pop('ZNAXIS')
with pytest.raises(KeyError):
comp_hdu.compressed_data
comp_hdu = fits.CompImageHDU(a)
comp_hdu._header.pop('ZBITPIX')
with pytest.raises(KeyError):
comp_hdu.compressed_data
def test_compressed_header_double_extname(self):
"""Test that a double EXTNAME with one default value does not
mask the non-default value."""
with fits.open(self.data('double_ext.fits')) as hdul:
hdu = hdul[1]
# Raw header has 2 EXTNAME entries
indices = hdu._header._keyword_indices['EXTNAME']
assert len(indices) == 2
# The non-default name should be returned.
assert hdu.name == 'ccd00'
assert 'EXTNAME' in hdu.header
assert hdu.name == hdu.header['EXTNAME']
# There should be 1 non-default EXTNAME entries.
indices = hdu.header._keyword_indices['EXTNAME']
assert len(indices) == 1
# Test header sync from property set.
new_name = 'NEW_NAME'
hdu.name = new_name
assert hdu.name == new_name
assert hdu.header['EXTNAME'] == new_name
assert hdu._header['EXTNAME'] == new_name
assert hdu._image_header['EXTNAME'] == new_name
# Check that setting the header will change the name property.
hdu.header['EXTNAME'] = 'NEW2'
assert hdu.name == 'NEW2'
hdul.writeto(self.temp('tmp.fits'), overwrite=True)
with fits.open(self.temp('tmp.fits')) as hdul1:
hdu1 = hdul1[1]
assert len(hdu1._header._keyword_indices['EXTNAME']) == 1
assert hdu1.name == 'NEW2'
# Check that deleting EXTNAME will and setting the name will
# work properly.
del hdu.header['EXTNAME']
hdu.name = 'RE-ADDED'
assert hdu.name == 'RE-ADDED'
with pytest.raises(TypeError):
hdu.name = 42
def test_compressed_header_extname(self):
"""Test consistent EXTNAME / hdu name interaction."""
name = 'FOO'
hdu = fits.CompImageHDU(data=np.arange(10), name=name)
assert hdu._header['EXTNAME'] == name
assert hdu.header['EXTNAME'] == name
assert hdu.name == name
name = 'BAR'
hdu.name = name
assert hdu._header['EXTNAME'] == name
assert hdu.header['EXTNAME'] == name
assert hdu.name == name
assert len(hdu._header._keyword_indices['EXTNAME']) == 1
def test_compressed_header_minimal(self):
"""
Regression test for https://github.com/astropy/astropy/issues/11694
Tests that CompImageHDU can be initialized with a Header that
contains few or no cards, and doesn't require specific cards
such as 'BITPIX' or 'NAXIS'.
"""
fits.CompImageHDU(data=np.arange(10), header=fits.Header())
header = fits.Header({'HELLO': 'world'})
hdu = fits.CompImageHDU(data=np.arange(10), header=header)
assert hdu.header['HELLO'] == 'world'
@pytest.mark.parametrize(
('keyword', 'dtype', 'expected'),
[('BSCALE', np.uint8, np.float32), ('BSCALE', np.int16, np.float32),
('BSCALE', np.int32, np.float64), ('BZERO', np.uint8, np.float32),
('BZERO', np.int16, np.float32), ('BZERO', np.int32, np.float64)])
def test_compressed_scaled_float(self, keyword, dtype, expected):
"""
If BSCALE,BZERO is set to floating point values, the image
should be floating-point.
https://github.com/astropy/astropy/pull/6492
Parameters
----------
keyword : `str`
Keyword to set to a floating-point value to trigger
floating-point pixels.
dtype : `numpy.dtype`
Type of original array.
expected : `numpy.dtype`
Expected type of uncompressed array.
"""
value = 1.23345 # A floating-point value
hdu = fits.CompImageHDU(np.arange(0, 10, dtype=dtype))
hdu.header[keyword] = value
hdu.writeto(self.temp('test.fits'))
del hdu
with fits.open(self.temp('test.fits')) as hdu:
assert hdu[1].header[keyword] == value
assert hdu[1].data.dtype == expected
@pytest.mark.parametrize('dtype', (np.uint8, np.int16, np.uint16, np.int32,
np.uint32))
def test_compressed_integers(self, dtype):
"""Test that the various integer dtypes are correctly written and read.
Regression test for https://github.com/astropy/astropy/issues/9072
"""
mid = np.iinfo(dtype).max // 2
data = np.arange(mid-50, mid+50, dtype=dtype)
testfile = self.temp('test.fits')
hdu = fits.CompImageHDU(data=data)
hdu.writeto(testfile, overwrite=True)
new = fits.getdata(testfile)
np.testing.assert_array_equal(data, new)
def test_write_non_contiguous_data(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2150
"""
orig = np.arange(100, dtype=float).reshape((10, 10), order='f')
assert not orig.flags.contiguous
primary = fits.PrimaryHDU()
hdu = fits.CompImageHDU(orig)
hdulist = fits.HDUList([primary, hdu])
hdulist.writeto(self.temp('test.fits'))
actual = fits.getdata(self.temp('test.fits'))
assert_equal(orig, actual)
def test_slice_and_write_comp_hdu(self):
"""
Regression test for https://github.com/astropy/astropy/issues/9955
"""
with fits.open(self.data('comp.fits')) as hdul:
hdul[1].data = hdul[1].data[:200, :100]
assert not hdul[1].data.flags.contiguous
hdul[1].writeto(self.temp('test.fits'))
with fits.open(self.data('comp.fits')) as hdul1:
with fits.open(self.temp('test.fits')) as hdul2:
assert_equal(hdul1[1].data[:200, :100], hdul2[1].data)
def test_comphdu_bscale(tmpdir):
"""
Regression test for a bug that caused extensions that used BZERO and BSCALE
that got turned into CompImageHDU to end up with BZERO/BSCALE before the
TFIELDS.
"""
filename1 = tmpdir.join('3hdus.fits').strpath
filename2 = tmpdir.join('3hdus_comp.fits').strpath
x = np.random.random((100, 100))*100
x0 = fits.PrimaryHDU()
x1 = fits.ImageHDU(np.array(x-50, dtype=int), uint=True)
x1.header['BZERO'] = 20331
x1.header['BSCALE'] = 2.3
hdus = fits.HDUList([x0, x1])
hdus.writeto(filename1)
# fitsverify (based on cfitsio) should fail on this file, only seeing the
# first HDU.
with fits.open(filename1) as hdus:
hdus[1] = fits.CompImageHDU(data=hdus[1].data.astype(np.uint32),
header=hdus[1].header)
hdus.writeto(filename2)
# open again and verify
with fits.open(filename2) as hdus:
hdus[1].verify('exception')
def test_scale_implicit_casting():
# Regression test for an issue that occurred because Numpy now does not
# allow implicit type casting during inplace operations.
hdu = fits.ImageHDU(np.array([1], dtype=np.int32))
hdu.scale(bzero=1.3)
def test_bzero_implicit_casting_compressed():
# Regression test for an issue that occurred because Numpy now does not
# allow implicit type casting during inplace operations. Astropy is
# actually not able to produce a file that triggers the failure - the
# issue occurs when using unsigned integer types in the FITS file, in which
# case BZERO should be 32768. But if the keyword is stored as 32768.0, then
# it was possible to trigger the implicit casting error.
filename = get_pkg_data_filename('data/compressed_float_bzero.fits')
with fits.open(filename) as hdul:
hdu = hdul[1]
hdu.data
def test_bzero_mishandled_info(tmpdir):
# Regression test for #5507:
# Calling HDUList.info() on a dataset which applies a zeropoint
# from BZERO but which astropy.io.fits does not think it needs
# to resize to a new dtype results in an AttributeError.
filename = tmpdir.join('floatimg_with_bzero.fits').strpath
hdu = fits.ImageHDU(np.zeros((10, 10)))
hdu.header['BZERO'] = 10
hdu.writeto(filename, overwrite=True)
with fits.open(filename) as hdul:
hdul.info()
def test_image_write_readonly(tmpdir):
# Regression test to make sure that we can write out read-only arrays (#5512)
x = np.array([1, 2, 3])
x.setflags(write=False)
ghdu = fits.ImageHDU(data=x)
ghdu.add_datasum()
filename = tmpdir.join('test.fits').strpath
ghdu.writeto(filename)
with fits.open(filename) as hdulist:
assert_equal(hdulist[1].data, [1, 2, 3])
# Same for compressed HDU
x = np.array([1.0, 2.0, 3.0])
x.setflags(write=False)
ghdu = fits.CompImageHDU(data=x)
# add_datasum does not work for CompImageHDU
# ghdu.add_datasum()
filename = tmpdir.join('test2.fits').strpath
ghdu.writeto(filename)
with fits.open(filename) as hdulist:
assert_equal(hdulist[1].data, [1.0, 2.0, 3.0])
def test_int8(tmp_path):
'''Test for int8 support, https://github.com/astropy/astropy/issues/11995'''
img = np.arange(-50, 50, dtype=np.int8).reshape(10, 10)
hdu = fits.PrimaryHDU(img)
hdu.writeto(tmp_path / "int8.fits")
with fits.open(tmp_path / "int8.fits") as hdul:
assert hdul[0].header['BITPIX'] == 8
assert hdul[0].header['BZERO'] == -128
assert hdul[0].header['BSCALE'] == 1.0
assert_equal(hdul[0].data, img)
assert hdul[0].data.dtype == img.dtype
|
2a77e90ee415211b80c0143a75d598247fc36b760125df9aad8516582d486ec7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
import os
import functools
from contextlib import nullcontext
from io import BytesIO
import re
from textwrap import dedent
import pytest
import numpy as np
from numpy import ma
from astropy.table import Table, MaskedColumn
from astropy.io import ascii
from astropy.io.ascii.core import ParameterError, FastOptionsError, InconsistentTableError
from astropy.io.ascii.fastbasic import (
FastBasic, FastCsv, FastTab, FastCommentedHeader, FastRdb, FastNoHeader)
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyWarning
from .common import assert_equal, assert_almost_equal, assert_true
StringIO = lambda x: BytesIO(x.encode('ascii')) # noqa
CI = os.environ.get('CI', False)
def assert_table_equal(t1, t2, check_meta=False, rtol=1.e-15, atol=1.e-300):
"""
Test equality of all columns in a table, with stricter tolerances for
float columns than the np.allclose default.
"""
assert_equal(len(t1), len(t2))
assert_equal(t1.colnames, t2.colnames)
if check_meta:
assert_equal(t1.meta, t2.meta)
for name in t1.colnames:
if len(t1) != 0:
assert_equal(t1[name].dtype.kind, t2[name].dtype.kind)
if not isinstance(t1[name], MaskedColumn):
for i, el in enumerate(t1[name]):
try:
if not isinstance(el, str) and np.isnan(el):
assert_true(not isinstance(t2[name][i], str) and np.isnan(t2[name][i]))
elif isinstance(el, str):
assert_equal(el, t2[name][i])
else:
assert_almost_equal(el, t2[name][i], rtol=rtol, atol=atol)
except (TypeError, NotImplementedError):
pass # ignore for now
# Use this counter to create a unique filename for each file created in a test
# if this function is called more than once in a single test
_filename_counter = 0
def _read(tmpdir, table, Reader=None, format=None, parallel=False, check_meta=False, **kwargs):
# make sure we have a newline so table can't be misinterpreted as a filename
global _filename_counter
table += '\n'
reader = Reader(**kwargs)
t1 = reader.read(table)
t2 = reader.read(StringIO(table))
t3 = reader.read(table.splitlines())
t4 = ascii.read(table, format=format, guess=False, **kwargs)
t5 = ascii.read(table, format=format, guess=False, fast_reader=False, **kwargs)
assert_table_equal(t1, t2, check_meta=check_meta)
assert_table_equal(t2, t3, check_meta=check_meta)
assert_table_equal(t3, t4, check_meta=check_meta)
assert_table_equal(t4, t5, check_meta=check_meta)
if parallel:
if CI:
pytest.xfail("Multiprocessing can sometimes fail on CI")
t6 = ascii.read(table, format=format, guess=False, fast_reader={
'parallel': True}, **kwargs)
assert_table_equal(t1, t6, check_meta=check_meta)
filename = str(tmpdir.join(f'table{_filename_counter}.txt'))
_filename_counter += 1
with open(filename, 'wb') as f:
f.write(table.encode('ascii'))
f.flush()
t7 = ascii.read(filename, format=format, guess=False, **kwargs)
if parallel:
t8 = ascii.read(filename, format=format, guess=False, fast_reader={
'parallel': True}, **kwargs)
assert_table_equal(t1, t7, check_meta=check_meta)
if parallel:
assert_table_equal(t1, t8, check_meta=check_meta)
return t1
@pytest.fixture(scope='function')
def read_basic(tmpdir, request):
return functools.partial(_read, tmpdir, Reader=FastBasic, format='basic')
@pytest.fixture(scope='function')
def read_csv(tmpdir, request):
return functools.partial(_read, tmpdir, Reader=FastCsv, format='csv')
@pytest.fixture(scope='function')
def read_tab(tmpdir, request):
return functools.partial(_read, tmpdir, Reader=FastTab, format='tab')
@pytest.fixture(scope='function')
def read_commented_header(tmpdir, request):
return functools.partial(_read, tmpdir, Reader=FastCommentedHeader,
format='commented_header')
@pytest.fixture(scope='function')
def read_rdb(tmpdir, request):
return functools.partial(_read, tmpdir, Reader=FastRdb, format='rdb')
@pytest.fixture(scope='function')
def read_no_header(tmpdir, request):
return functools.partial(_read, tmpdir, Reader=FastNoHeader,
format='no_header')
@pytest.mark.parametrize('delimiter', [',', '\t', ' ', 'csv'])
@pytest.mark.parametrize('quotechar', ['"', "'"])
@pytest.mark.parametrize('fast', [False, True])
def test_embedded_newlines(delimiter, quotechar, fast):
"""Test that embedded newlines are supported for io.ascii readers
and writers, both fast and Python readers."""
# Start with an assortment of values with different embedded newlines and whitespace
dat = [['\t a ', ' b \n cd ', '\n'],
[' 1\n ', '2 \n" \t 3\n4\n5', "1\n '2\n"],
[' x,y \nz\t', '\t 12\n\t34\t ', '56\t\n'],
]
dat = Table(dat, names=('a', 'b', 'c'))
# Construct a table which is our expected result of writing the table and
# reading it back. Certain stripping of whitespace is expected.
exp = {} # expected output from reading
for col in dat.itercols():
vals = []
for val in col:
# Readers and writers both strip whitespace from ends of values
val = val.strip(' \t')
if not fast:
# Pure Python reader has a "feature" where it strips trailing
# whitespace from each input line. This means a value like
# " x \ny \t\n" gets read as "x\ny".
bits = val.splitlines(keepends=True)
bits_out = []
for bit in bits:
bit = re.sub(r'[ \t]+(\n?)$', r'\1', bit.strip(' \t'))
bits_out.append(bit)
val = ''.join(bits_out)
vals.append(val)
exp[col.info.name] = vals
exp = Table(exp)
if delimiter == 'csv':
format = 'csv'
delimiter = ','
else:
format = 'basic'
# Write the table to `text`
fh = io.StringIO()
ascii.write(dat, fh, format=format, delimiter=delimiter,
quotechar=quotechar, fast_writer=fast)
text = fh.getvalue()
# Read it back and compare to the expected
dat_out = ascii.read(text, format=format, guess=False, delimiter=delimiter,
quotechar=quotechar, fast_reader=fast)
eq = dat_out.values_equal(exp)
assert all(np.all(col) for col in eq.itercols())
@pytest.mark.parametrize("parallel", [True, False])
def test_simple_data(parallel, read_basic):
"""
Make sure the fast reader works with basic input data.
"""
table = read_basic("A B C\n1 2 3\n4 5 6", parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
def test_read_types():
"""
Make sure that the read() function takes filenames,
strings, and lists of strings in addition to file-like objects.
"""
t1 = ascii.read("a b c\n1 2 3\n4 5 6", format='fast_basic', guess=False)
# TODO: also read from file
t2 = ascii.read(StringIO("a b c\n1 2 3\n4 5 6"), format='fast_basic', guess=False)
t3 = ascii.read(["a b c", "1 2 3", "4 5 6"], format='fast_basic', guess=False)
assert_table_equal(t1, t2)
assert_table_equal(t2, t3)
@pytest.mark.parametrize("parallel", [True, False])
def test_supplied_names(parallel, read_basic):
"""
If passed as a parameter, names should replace any
column names found in the header.
"""
table = read_basic("A B C\n1 2 3\n4 5 6", names=('X', 'Y', 'Z'), parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('X', 'Y', 'Z'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_no_header(parallel, read_basic, read_no_header):
"""
The header should not be read when header_start=None. Unless names is
passed, the column names should be auto-generated.
"""
# Cannot set header_start=None for basic format
with pytest.raises(ValueError):
read_basic("A B C\n1 2 3\n4 5 6", header_start=None, data_start=0, parallel=parallel)
t2 = read_no_header("A B C\n1 2 3\n4 5 6", parallel=parallel)
expected = Table([['A', '1', '4'], ['B', '2', '5'], ['C', '3', '6']],
names=('col1', 'col2', 'col3'))
assert_table_equal(t2, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_no_header_supplied_names(parallel, read_basic, read_no_header):
"""
If header_start=None and names is passed as a parameter, header
data should not be read and names should be used instead.
"""
table = read_no_header("A B C\n1 2 3\n4 5 6",
names=('X', 'Y', 'Z'), parallel=parallel)
expected = Table([['A', '1', '4'], ['B', '2', '5'], ['C', '3', '6']], names=('X', 'Y', 'Z'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_comment(parallel, read_basic):
"""
Make sure that line comments are ignored by the C reader.
"""
table = read_basic("# comment\nA B C\n # another comment\n1 2 3\n4 5 6", parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_empty_lines(parallel, read_basic):
"""
Make sure that empty lines are ignored by the C reader.
"""
table = read_basic("\n\nA B C\n1 2 3\n\n\n4 5 6\n\n\n\n", parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_lstrip_whitespace(parallel, read_basic):
"""
Test to make sure the reader ignores whitespace at the beginning of fields.
"""
text = """
1, 2, \t3
A,\t\t B, C
a, b, c
""" + ' \n'
table = read_basic(text, delimiter=',', parallel=parallel)
expected = Table([['A', 'a'], ['B', 'b'], ['C', 'c']], names=('1', '2', '3'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_rstrip_whitespace(parallel, read_basic):
"""
Test to make sure the reader ignores whitespace at the end of fields.
"""
text = ' 1 ,2 \t,3 \nA\t,B ,C\t \t \n \ta ,b , c \n'
table = read_basic(text, delimiter=',', parallel=parallel)
expected = Table([['A', 'a'], ['B', 'b'], ['C', 'c']], names=('1', '2', '3'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_conversion(parallel, read_basic):
"""
The reader should try to convert each column to ints. If this fails, the
reader should try to convert to floats. Failing this, i.e. on parsing
non-numeric input including isolated positive/negative signs, it should
fall back to strings.
"""
text = """
A B C D E F G H
1 a 3 4 5 6 7 8
2. 1 9 -.1e1 10.0 8.7 6 -5.3e4
4 2 -12 .4 +.e1 - + six
"""
table = read_basic(text, parallel=parallel)
assert_equal(table['A'].dtype.kind, 'f')
assert table['B'].dtype.kind in ('S', 'U')
assert_equal(table['C'].dtype.kind, 'i')
assert_equal(table['D'].dtype.kind, 'f')
assert table['E'].dtype.kind in ('S', 'U')
assert table['F'].dtype.kind in ('S', 'U')
assert table['G'].dtype.kind in ('S', 'U')
assert table['H'].dtype.kind in ('S', 'U')
@pytest.mark.parametrize("parallel", [True, False])
def test_delimiter(parallel, read_basic):
"""
Make sure that different delimiters work as expected.
"""
text = dedent("""
COL1 COL2 COL3
1 A -1
2 B -2
""")
expected = Table([[1, 2], ['A', 'B'], [-1, -2]], names=('COL1', 'COL2', 'COL3'))
for sep in ' ,\t#;':
table = read_basic(text.replace(' ', sep), delimiter=sep, parallel=parallel)
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_include_names(parallel, read_basic):
"""
If include_names is not None, the parser should read only those columns in include_names.
"""
table = read_basic("A B C D\n1 2 3 4\n5 6 7 8", include_names=['A', 'D'], parallel=parallel)
expected = Table([[1, 5], [4, 8]], names=('A', 'D'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_exclude_names(parallel, read_basic):
"""
If exclude_names is not None, the parser should exclude the columns in exclude_names.
"""
table = read_basic("A B C D\n1 2 3 4\n5 6 7 8", exclude_names=['A', 'D'], parallel=parallel)
expected = Table([[2, 6], [3, 7]], names=('B', 'C'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_include_exclude_names(parallel, read_basic):
"""
Make sure that include_names is applied before exclude_names if both are specified.
"""
text = dedent("""
A B C D E F G H
1 2 3 4 5 6 7 8
9 10 11 12 13 14 15 16
""")
table = read_basic(text, include_names=['A', 'B', 'D', 'F', 'H'],
exclude_names=['B', 'F'], parallel=parallel)
expected = Table([[1, 9], [4, 12], [8, 16]], names=('A', 'D', 'H'))
assert_table_equal(table, expected)
def test_doubled_quotes(read_csv):
"""
Test #8283 (fix for #8281), parsing doubled-quotes "ab""cd" in a quoted
field was incorrect.
"""
tbl = '\n'.join(['a,b',
'"d""","d""q"',
'"""q",""""'])
expected = Table([['d"', '"q'],
['d"q', '"']],
names=('a', 'b'))
dat = read_csv(tbl)
assert_table_equal(dat, expected)
# In addition to the local read_csv wrapper, check that default
# parsing with guessing gives the right answer.
for fast_reader in True, False:
dat = ascii.read(tbl, fast_reader=fast_reader)
assert_table_equal(dat, expected)
@pytest.mark.filterwarnings("ignore:OverflowError converting to IntType in column TIMESTAMP")
def test_doubled_quotes_segv():
"""
Test the exact example from #8281 which resulted in SEGV prior to #8283
(in contrast to the tests above that just gave the wrong answer).
Attempts to produce a more minimal example were unsuccessful, so the whole
thing is included.
"""
tbl = dedent("""
"ID","TIMESTAMP","addendum_id","bib_reference","bib_reference_url","client_application","client_category","client_sort_key","color","coordsys","creator","creator_did","data_pixel_bitpix","dataproduct_subtype","dataproduct_type","em_max","em_min","format","hips_builder","hips_copyright","hips_creation_date","hips_creation_date_1","hips_creator","hips_data_range","hips_estsize","hips_frame","hips_glu_tag","hips_hierarchy","hips_initial_dec","hips_initial_fov","hips_initial_ra","hips_lon_asc","hips_master_url","hips_order","hips_order_1","hips_order_4","hips_order_min","hips_overlay","hips_pixel_bitpix","hips_pixel_cut","hips_pixel_scale","hips_progenitor_url","hips_publisher","hips_release_date","hips_release_date_1","hips_rgb_blue","hips_rgb_green","hips_rgb_red","hips_sampling","hips_service_url","hips_service_url_1","hips_service_url_2","hips_service_url_3","hips_service_url_4","hips_service_url_5","hips_service_url_6","hips_service_url_7","hips_service_url_8","hips_skyval","hips_skyval_method","hips_skyval_value","hips_status","hips_status_1","hips_status_2","hips_status_3","hips_status_4","hips_status_5","hips_status_6","hips_status_7","hips_status_8","hips_tile_format","hips_tile_format_1","hips_tile_format_4","hips_tile_width","hips_version","hipsgen_date","hipsgen_date_1","hipsgen_date_10","hipsgen_date_11","hipsgen_date_12","hipsgen_date_2","hipsgen_date_3","hipsgen_date_4","hipsgen_date_5","hipsgen_date_6","hipsgen_date_7","hipsgen_date_8","hipsgen_date_9","hipsgen_params","hipsgen_params_1","hipsgen_params_10","hipsgen_params_11","hipsgen_params_12","hipsgen_params_2","hipsgen_params_3","hipsgen_params_4","hipsgen_params_5","hipsgen_params_6","hipsgen_params_7","hipsgen_params_8","hipsgen_params_9","label","maxOrder","moc_access_url","moc_order","moc_release_date","moc_sky_fraction","obs_ack","obs_collection","obs_copyrigh_url","obs_copyright","obs_copyright_1","obs_copyright_url","obs_copyright_url_1","obs_description","obs_description_url","obs_descrition_url","obs_id","obs_initial_dec","obs_initial_fov","obs_initial_ra","obs_provenance","obs_regime","obs_title","ohips_frame","pixelCut","pixelRange","prov_did","prov_progenitor","prov_progenitor_url","publisher_did","publisher_id","s_pixel_scale","t_max","t_min"
"CDS/P/2MASS/H","1524123841000","","2006AJ....131.1163S","http://cdsbib.u-strasbg.fr/cgi-bin/cdsbib?2006AJ....131.1163S","AladinDesktop","Image/Infrared/2MASS","04-001-03","","","","ivo://CDS/P/2MASS/H","","","image","1.798E-6","1.525E-6","","Aladin/HipsGen v9.017","CNRS/Unistra","2013-05-06T20:36Z","","CDS (A.Oberto)","","","equatorial","","mean","","","","","","9","","","","","","0 60","2.236E-4","","","2016-04-22T13:48Z","","","","","","http://alasky.u-strasbg.fr/2MASS/H","https://irsa.ipac.caltech.edu/data/hips/CDS/2MASS/H","http://alaskybis.u-strasbg.fr/2MASS/H","https://alaskybis.u-strasbg.fr/2MASS/H","","","","","","","","","public master clonableOnce","public mirror unclonable","public mirror clonableOnce","public mirror clonableOnce","","","","","","jpeg fits","","","512","1.31","","","","","","","","","","","","","","","","","","","","","","","","","","","","","http://alasky.u-strasbg.fr/2MASS/H/Moc.fits","9","","1","University of Massachusetts & IPAC/Caltech","The Two Micron All Sky Survey - H band (2MASS H)","","University of Massachusetts & IPAC/Caltech","","http://www.ipac.caltech.edu/2mass/","","2MASS has uniformly scanned the entire sky in three near-infrared bands to detect and characterize point sources brighter than about 1 mJy in each band, with signal-to-noise ratio (SNR) greater than 10, using a pixel size of 2.0"". This has achieved an 80,000-fold improvement in sensitivity relative to earlier surveys. 2MASS used two highly-automated 1.3-m telescopes, one at Mt. Hopkins, AZ, and one at CTIO, Chile. Each telescope was equipped with a three-channel camera, each channel consisting of a 256x256 array of HgCdTe detectors, capable of observing the sky simultaneously at J (1.25 microns), H (1.65 microns), and Ks (2.17 microns). The University of Massachusetts (UMass) was responsible for the overall management of the project, and for developing the infrared cameras and on-site computing systems at both facilities. The Infrared Processing and Analysis Center (IPAC) is responsible for all data processing through the Production Pipeline, and construction and distribution of the data products. Funding is provided primarily by NASA and the NSF","","","","+0","0.11451621372724685","0","","Infrared","2MASS H (1.66um)","","","","","IPAC/NASA","","","","","51941","50600"
""") # noqa
ascii.read(tbl, format='csv', fast_reader=True, guess=False)
@pytest.mark.parametrize("parallel", [True, False])
def test_quoted_fields(parallel, read_basic):
"""
The character quotechar (default '"') should denote the start of a field which can
contain the field delimiter and newlines.
"""
if parallel:
pytest.xfail("Multiprocessing can fail with quoted fields")
text = dedent("""
"A B" C D
1.5 2.1 -37.1
a b " c
d"
""")
table = read_basic(text, parallel=parallel)
expected = Table([['1.5', 'a'], ['2.1', 'b'], ['-37.1', 'c\nd']], names=('A B', 'C', 'D'))
assert_table_equal(table, expected)
table = read_basic(text.replace('"', "'"), quotechar="'", parallel=parallel)
assert_table_equal(table, expected)
@pytest.mark.parametrize("key,val", [
('delimiter', ',,'), # multi-char delimiter
('comment', '##'), # multi-char comment
('data_start', None), # data_start=None
('data_start', -1), # data_start negative
('quotechar', '##'), # multi-char quote signifier
('header_start', -1), # negative header_start
('converters', dict((i + 1, ascii.convert_numpy(np.uint))
for i in range(3))), # passing converters
('Inputter', ascii.ContinuationLinesInputter), # passing Inputter
('header_Splitter', ascii.DefaultSplitter), # passing Splitter
('data_Splitter', ascii.DefaultSplitter)])
def test_invalid_parameters(key, val):
"""
Make sure the C reader raises an error if passed parameters it can't handle.
"""
with pytest.raises(ParameterError):
FastBasic(**{key: val}).read('1 2 3\n4 5 6')
with pytest.raises(ParameterError):
ascii.read('1 2 3\n4 5 6',
format='fast_basic', guess=False, **{key: val})
def test_invalid_parameters_other():
with pytest.raises(TypeError):
FastBasic(foo=7).read('1 2 3\n4 5 6') # unexpected argument
with pytest.raises(FastOptionsError): # don't fall back on the slow reader
ascii.read('1 2 3\n4 5 6', format='basic', fast_reader={'foo': 7})
with pytest.raises(ParameterError):
# Outputter cannot be specified in constructor
FastBasic(Outputter=ascii.TableOutputter).read('1 2 3\n4 5 6')
def test_too_many_cols1():
"""
If a row contains too many columns, the C reader should raise an error.
"""
text = dedent("""
A B C
1 2 3
4 5 6
7 8 9 10
11 12 13
""")
with pytest.raises(InconsistentTableError) as e:
FastBasic().read(text)
assert 'Number of header columns (3) ' \
'inconsistent with data columns in data line 2' in str(e.value)
def test_too_many_cols2():
text = """\
aaa,bbb
1,2,
3,4,
"""
with pytest.raises(InconsistentTableError) as e:
FastCsv().read(text)
assert 'Number of header columns (2) ' \
'inconsistent with data columns in data line 0' in str(e.value)
def test_too_many_cols3():
text = """\
aaa,bbb
1,2,,
3,4,
"""
with pytest.raises(InconsistentTableError) as e:
FastCsv().read(text)
assert 'Number of header columns (2) ' \
'inconsistent with data columns in data line 0' in str(e.value)
def test_too_many_cols4():
# https://github.com/astropy/astropy/issues/9922
with pytest.raises(InconsistentTableError) as e:
ascii.read(get_pkg_data_filename('data/conf_py.txt'),
fast_reader=True, guess=True)
assert 'Unable to guess table format with the guesses listed below' in str(e.value)
@pytest.mark.parametrize("parallel", [True, False])
def test_not_enough_cols(parallel, read_csv):
"""
If a row does not have enough columns, the FastCsv reader should add empty
fields while the FastBasic reader should raise an error.
"""
text = """
A,B,C
1,2,3
4,5
6,7,8
"""
table = read_csv(text, parallel=parallel)
assert table['B'][1] is not ma.masked
assert table['C'][1] is ma.masked
with pytest.raises(InconsistentTableError):
table = FastBasic(delimiter=',').read(text)
@pytest.mark.parametrize("parallel", [True, False])
def test_data_end(parallel, read_basic, read_rdb):
"""
The parameter data_end should specify where data reading ends.
"""
text = """
A B C
1 2 3
4 5 6
7 8 9
10 11 12
"""
table = read_basic(text, data_end=3, parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
# data_end supports negative indexing
table = read_basic(text, data_end=-2, parallel=parallel)
assert_table_equal(table, expected)
text = """
A\tB\tC
N\tN\tS
1\t2\ta
3\t4\tb
5\t6\tc
"""
# make sure data_end works with RDB
table = read_rdb(text, data_end=-1, parallel=parallel)
expected = Table([[1, 3], [2, 4], ['a', 'b']], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
# positive index
table = read_rdb(text, data_end=3, parallel=parallel)
expected = Table([[1], [2], ['a']], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
# empty table if data_end is too small
table = read_rdb(text, data_end=1, parallel=parallel)
expected = Table([[], [], []], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_inf_nan(parallel, read_basic):
"""
Test that inf and nan-like values are correctly parsed on all platforms.
Regression test for https://github.com/astropy/astropy/pull/3525
"""
text = dedent("""\
A
nan
+nan
-nan
inf
infinity
+inf
+infinity
-inf
-infinity
""")
expected = Table({'A': [np.nan, np.nan, np.nan,
np.inf, np.inf, np.inf, np.inf,
-np.inf, -np.inf]})
table = read_basic(text, parallel=parallel)
assert table['A'].dtype.kind == 'f'
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_fill_values(parallel, read_basic):
"""
Make sure that the parameter fill_values works as intended. If fill_values
is not specified, the default behavior should be to convert '' to 0.
"""
text = """
A, B, C
, 2, nan
a, -999, -3.4
nan, 5, -9999
8, nan, 7.6e12
"""
table = read_basic(text, delimiter=',', parallel=parallel)
# The empty value in row A should become a masked '0'
assert isinstance(table['A'], MaskedColumn)
assert table['A'][0] is ma.masked
# '0' rather than 0 because there is a string in the column
assert_equal(table['A'].data.data[0], '0')
assert table['A'][1] is not ma.masked
table = read_basic(text, delimiter=',', fill_values=('-999', '0'), parallel=parallel)
assert isinstance(table['B'], MaskedColumn)
assert table['A'][0] is not ma.masked # empty value unaffected
assert table['C'][2] is not ma.masked # -9999 is not an exact match
assert table['B'][1] is ma.masked
# Numeric because the rest of the column contains numeric data
assert_equal(table['B'].data.data[1], 0.0)
assert table['B'][0] is not ma.masked
table = read_basic(text, delimiter=',', fill_values=[], parallel=parallel)
# None of the columns should be masked
for name in 'ABC':
assert not isinstance(table[name], MaskedColumn)
table = read_basic(text, delimiter=',',
fill_values=[('', '0', 'A'),
('nan', '999', 'A', 'C')], parallel=parallel)
assert np.isnan(table['B'][3]) # nan filling skips column B
assert table['B'][3] is not ma.masked # should skip masking as well as replacing nan
assert table['A'][0] is ma.masked
assert table['A'][2] is ma.masked
assert_equal(table['A'].data.data[0], '0')
assert_equal(table['A'].data.data[2], '999')
assert table['C'][0] is ma.masked
assert_almost_equal(table['C'].data.data[0], 999.0)
assert_almost_equal(table['C'][1], -3.4) # column is still of type float
@pytest.mark.parametrize("parallel", [True, False])
def test_fill_include_exclude_names(parallel, read_csv):
"""
fill_include_names and fill_exclude_names should filter missing/empty value handling
in the same way that include_names and exclude_names filter output columns.
"""
text = """
A, B, C
, 1, 2
3, , 4
5, 5,
"""
table = read_csv(text, fill_include_names=['A', 'B'], parallel=parallel)
assert table['A'][0] is ma.masked
assert table['B'][1] is ma.masked
assert table['C'][2] is not ma.masked # C not in fill_include_names
table = read_csv(text, fill_exclude_names=['A', 'B'], parallel=parallel)
assert table['C'][2] is ma.masked
assert table['A'][0] is not ma.masked
assert table['B'][1] is not ma.masked # A and B excluded from fill handling
table = read_csv(text, fill_include_names=['A', 'B'],
fill_exclude_names=['B'], parallel=parallel)
assert table['A'][0] is ma.masked
assert table['B'][1] is not ma.masked # fill_exclude_names applies after fill_include_names
assert table['C'][2] is not ma.masked
@pytest.mark.parametrize("parallel", [True, False])
def test_many_rows(parallel, read_basic):
"""
Make sure memory reallocation works okay when the number of rows
is large (so that each column string is longer than INITIAL_COL_SIZE).
"""
text = 'A B C\n'
for i in range(500): # create 500 rows
text += ' '.join([str(i) for i in range(3)])
text += '\n'
table = read_basic(text, parallel=parallel)
expected = Table([[0] * 500, [1] * 500, [2] * 500], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_many_columns(parallel, read_basic):
"""
Make sure memory reallocation works okay when the number of columns
is large (so that each header string is longer than INITIAL_HEADER_SIZE).
"""
# create a string with 500 columns and two data rows
text = ' '.join([str(i) for i in range(500)])
text += ('\n' + text + '\n' + text)
table = read_basic(text, parallel=parallel)
expected = Table([[i, i] for i in range(500)], names=[str(i) for i in range(500)])
assert_table_equal(table, expected)
def test_fast_reader():
"""
Make sure that ascii.read() works as expected by default and with
fast_reader specified.
"""
text = 'a b c\n1 2 3\n4 5 6'
with pytest.raises(ParameterError): # C reader can't handle regex comment
ascii.read(text, format='fast_basic', guess=False, comment='##')
# Enable multiprocessing and the fast converter
try:
ascii.read(text, format='basic', guess=False,
fast_reader={'parallel': True, 'use_fast_converter': True})
except NotImplementedError:
# Might get this on Windows, try without parallel...
if os.name == 'nt':
ascii.read(text, format='basic', guess=False,
fast_reader={'parallel': False,
'use_fast_converter': True})
else:
raise
# Should raise an error if fast_reader has an invalid key
with pytest.raises(FastOptionsError):
ascii.read(text, format='fast_basic', guess=False, fast_reader={'foo': True})
# Use the slow reader instead
ascii.read(text, format='basic', guess=False, comment='##', fast_reader=False)
# Will try the slow reader afterwards by default
ascii.read(text, format='basic', guess=False, comment='##')
@pytest.mark.parametrize("parallel", [True, False])
def test_read_tab(parallel, read_tab):
"""
The fast reader for tab-separated values should not strip whitespace, unlike
the basic reader.
"""
if parallel:
pytest.xfail("Multiprocessing can fail with quoted fields")
text = '1\t2\t3\n a\t b \t\n c\t" d\n e"\t '
table = read_tab(text, parallel=parallel)
assert_equal(table['1'][0], ' a') # preserve line whitespace
assert_equal(table['2'][0], ' b ') # preserve field whitespace
assert table['3'][0] is ma.masked # empty value should be masked
assert_equal(table['2'][1], ' d\n e') # preserve whitespace in quoted fields
assert_equal(table['3'][1], ' ') # preserve end-of-line whitespace
@pytest.mark.parametrize("parallel", [True, False])
def test_default_data_start(parallel, read_basic):
"""
If data_start is not explicitly passed to read(), data processing should
beginning right after the header.
"""
text = 'ignore this line\na b c\n1 2 3\n4 5 6'
table = read_basic(text, header_start=1, parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('a', 'b', 'c'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_commented_header(parallel, read_commented_header):
"""
The FastCommentedHeader reader should mimic the behavior of the
CommentedHeader by overriding the default header behavior of FastBasic.
"""
text = """
# A B C
1 2 3
4 5 6
"""
t1 = read_commented_header(text, parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C'))
assert_table_equal(t1, expected)
text = '# first commented line\n # second commented line\n\n' + text
t2 = read_commented_header(text, header_start=2, data_start=0, parallel=parallel)
assert_table_equal(t2, expected)
t3 = read_commented_header(text, header_start=-1, data_start=0,
parallel=parallel) # negative indexing allowed
assert_table_equal(t3, expected)
text += '7 8 9'
t4 = read_commented_header(text, header_start=2, data_start=2, parallel=parallel)
expected = Table([[7], [8], [9]], names=('A', 'B', 'C'))
assert_table_equal(t4, expected)
with pytest.raises(ParameterError):
read_commented_header(text, header_start=-1, data_start=-1,
parallel=parallel) # data_start cannot be negative
@pytest.mark.parametrize("parallel", [True, False])
def test_rdb(parallel, read_rdb):
"""
Make sure the FastRdb reader works as expected.
"""
text = """
A\tB\tC
1n\tS\t4N
1\t 9\t4.3
"""
table = read_rdb(text, parallel=parallel)
expected = Table([[1], [' 9'], [4.3]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
assert_equal(table['A'].dtype.kind, 'i')
assert table['B'].dtype.kind in ('S', 'U')
assert_equal(table['C'].dtype.kind, 'f')
with pytest.raises(ValueError) as e:
text = 'A\tB\tC\nN\tS\tN\n4\tb\ta' # C column contains non-numeric data
read_rdb(text, parallel=parallel)
assert 'Column C failed to convert' in str(e.value)
with pytest.raises(ValueError) as e:
text = 'A\tB\tC\nN\tN\n1\t2\t3' # not enough types specified
read_rdb(text, parallel=parallel)
assert 'mismatch between number of column names and column types' in str(e.value)
with pytest.raises(ValueError) as e:
text = 'A\tB\tC\nN\tN\t5\n1\t2\t3' # invalid type for column C
read_rdb(text, parallel=parallel)
assert 'type definitions do not all match [num](N|S)' in str(e.value)
@pytest.mark.parametrize("parallel", [True, False])
def test_data_start(parallel, read_basic):
"""
Make sure that data parsing begins at data_start (ignoring empty and
commented lines but not taking quoted values into account).
"""
if parallel:
pytest.xfail("Multiprocessing can fail with quoted fields")
text = """
A B C
1 2 3
4 5 6
7 8 "9
1"
# comment
10 11 12
"""
table = read_basic(text, data_start=2, parallel=parallel)
expected = Table([[4, 7, 10], [5, 8, 11], ["6", "9\n1", "12"]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
table = read_basic(text, data_start=3, parallel=parallel)
# ignore empty line
expected = Table([[7, 10], [8, 11], ["9\n1", "12"]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
with pytest.raises(InconsistentTableError) as e:
# tries to begin in the middle of quoted field
read_basic(text, data_start=4, parallel=parallel)
assert 'header columns (3) inconsistent with data columns in data line 0' \
in str(e.value)
table = read_basic(text, data_start=5, parallel=parallel)
# ignore commented line
expected = Table([[10], [11], [12]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
text = """
A B C
1 2 3
4 5 6
7 8 9
# comment
10 11 12
"""
# make sure reading works as expected in parallel
table = read_basic(text, data_start=2, parallel=parallel)
expected = Table([[4, 7, 10], [5, 8, 11], [6, 9, 12]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_quoted_empty_values(parallel, read_basic):
"""
Quoted empty values spanning multiple lines should be treated correctly.
"""
if parallel:
pytest.xfail("Multiprocessing can fail with quoted fields")
text = 'a b c\n1 2 " \n "'
table = read_basic(text, parallel=parallel)
assert table['c'][0] == '\n' # empty value masked by default
@pytest.mark.parametrize("parallel", [True, False])
def test_csv_comment_default(parallel, read_csv):
"""
Unless the comment parameter is specified, the CSV reader should
not treat any lines as comments.
"""
text = 'a,b,c\n#1,2,3\n4,5,6'
table = read_csv(text, parallel=parallel)
expected = Table([['#1', '4'], [2, 5], [3, 6]], names=('a', 'b', 'c'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_whitespace_before_comment(parallel, read_tab):
"""
Readers that don't strip whitespace from data (Tab, RDB)
should still treat lines with leading whitespace and then
the comment char as comment lines.
"""
text = 'a\tb\tc\n # comment line\n1\t2\t3'
table = read_tab(text, parallel=parallel)
expected = Table([[1], [2], [3]], names=('a', 'b', 'c'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_strip_line_trailing_whitespace(parallel, read_basic):
"""
Readers that strip whitespace from lines should ignore
trailing whitespace after the last data value of each
row.
"""
text = 'a b c\n1 2 \n3 4 5'
with pytest.raises(InconsistentTableError) as e:
ascii.read(StringIO(text), format='fast_basic', guess=False)
assert 'header columns (3) inconsistent with data columns in data line 0' \
in str(e.value)
text = 'a b c\n 1 2 3 \t \n 4 5 6 '
table = read_basic(text, parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('a', 'b', 'c'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_no_data(parallel, read_basic):
"""
As long as column names are supplied, the C reader
should return an empty table in the absence of data.
"""
table = read_basic('a b c', parallel=parallel)
expected = Table([[], [], []], names=('a', 'b', 'c'))
assert_table_equal(table, expected)
table = read_basic('a b c\n1 2 3', data_start=2, parallel=parallel)
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_line_endings(parallel, read_basic, read_commented_header, read_rdb):
"""
Make sure the fast reader accepts CR and CR+LF
as newlines.
"""
text = 'a b c\n1 2 3\n4 5 6\n7 8 9\n'
expected = Table([[1, 4, 7], [2, 5, 8], [3, 6, 9]], names=('a', 'b', 'c'))
for newline in ('\r\n', '\r'):
table = read_basic(text.replace('\n', newline), parallel=parallel)
assert_table_equal(table, expected)
# Make sure the splitlines() method of FileString
# works with CR/CR+LF line endings
text = '#' + text
for newline in ('\r\n', '\r'):
table = read_commented_header(text.replace('\n', newline), parallel=parallel)
assert_table_equal(table, expected)
expected = Table([MaskedColumn([1, 4, 7]), [2, 5, 8], MaskedColumn([3, 6, 9])],
names=('a', 'b', 'c'))
expected['a'][0] = np.ma.masked
expected['c'][0] = np.ma.masked
text = 'a\tb\tc\nN\tN\tN\n\t2\t\n4\t5\t6\n7\t8\t9\n'
for newline in ('\r\n', '\r'):
table = read_rdb(text.replace('\n', newline), parallel=parallel)
assert_table_equal(table, expected)
assert np.all(table == expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_store_comments(parallel, read_basic):
"""
Make sure that the output Table produced by the fast
reader stores any comment lines in its meta attribute.
"""
text = """
# header comment
a b c
# comment 2
# comment 3
1 2 3
4 5 6
"""
table = read_basic(text, parallel=parallel, check_meta=True)
assert_equal(table.meta['comments'],
['header comment', 'comment 2', 'comment 3'])
@pytest.mark.parametrize("parallel", [True, False])
def test_empty_quotes(parallel, read_basic):
"""
Make sure the C reader doesn't segfault when the
input data contains empty quotes. [#3407]
"""
table = read_basic('a b\n1 ""\n2 ""', parallel=parallel)
expected = Table([[1, 2], [0, 0]], names=('a', 'b'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_fast_tab_with_names(parallel, read_tab):
"""
Make sure the C reader doesn't segfault when the header for the
first column is missing [#3545]
"""
content = """#
\tdecDeg\tRate_pn_offAxis\tRate_mos2_offAxis\tObsID\tSourceID\tRADeg\tversion\tCounts_pn\tRate_pn\trun\tRate_mos1\tRate_mos2\tInserted_pn\tInserted_mos2\tbeta\tRate_mos1_offAxis\trcArcsec\tname\tInserted\tCounts_mos1\tInserted_mos1\tCounts_mos2\ty\tx\tCounts\toffAxis\tRot
-3.007559\t0.0000\t0.0010\t0013140201\t0\t213.462574\t0\t2\t0.0002\t0\t0.0001\t0.0001\t0\t1\t0.66\t0.0217\t3.0\tfakeXMMXCS J1413.8-0300\t3\t1\t2\t1\t398.000\t127.000\t5\t13.9\t72.3\t""" # noqa
head = [f'A{i}' for i in range(28)]
read_tab(content, data_start=1, parallel=parallel, names=head)
@pytest.mark.hugemem
def test_read_big_table(tmpdir):
"""Test reading of a huge file.
This test generates a huge CSV file (~2.3Gb) before reading it (see
https://github.com/astropy/astropy/pull/5319). The test is run only if the
``--run-hugemem`` cli option is given. Note that running the test requires
quite a lot of memory (~18Gb when reading the file) !!
"""
NB_ROWS = 250000
NB_COLS = 500
filename = str(tmpdir.join("big_table.csv"))
print(f"Creating a {NB_ROWS} rows table ({NB_COLS} columns).")
data = np.random.random(NB_ROWS)
t = Table(data=[data] * NB_COLS, names=[str(i) for i in range(NB_COLS)])
data = None
print(f"Saving the table to {filename}")
t.write(filename, format='ascii.csv', overwrite=True)
t = None
print("Counting the number of lines in the csv, it should be {NB_ROWS} + 1 (header).")
with open(filename, 'r') as f:
assert sum(1 for line in f) == NB_ROWS + 1
print("Reading the file with astropy.")
t = Table.read(filename, format='ascii.csv', fast_reader=True)
assert len(t) == NB_ROWS
@pytest.mark.hugemem
def test_read_big_table2(tmpdir):
"""Test reading of a file with a huge column.
"""
# (2**32 // 2) : max value for int
# // 10 : we use a value for rows that have 10 chars (1e9)
# + 5 : add a few lines so the length cannot be stored by an int
NB_ROWS = 2**32 // 2 // 10 + 5
filename = str(tmpdir.join("big_table.csv"))
print(f"Creating a {NB_ROWS} rows table.")
data = np.full(NB_ROWS, int(1e9), dtype=np.int32)
t = Table(data=[data], names=['a'], copy=False)
print(f"Saving the table to {filename}")
t.write(filename, format='ascii.csv', overwrite=True)
t = None
print("Counting the number of lines in the csv, it should be {NB_ROWS} + 1 (header).")
with open(filename, 'r') as f:
assert sum(1 for line in f) == NB_ROWS + 1
print("Reading the file with astropy.")
t = Table.read(filename, format='ascii.csv', fast_reader=True)
assert len(t) == NB_ROWS
# Test these both with guessing turned on and off
@pytest.mark.parametrize("guess", [True, False])
# fast_reader configurations: False| 'use_fast_converter'=False|True
@pytest.mark.parametrize('fast_reader', [False, dict(use_fast_converter=False),
dict(use_fast_converter=True)])
@pytest.mark.parametrize("parallel", [False, True])
def test_data_out_of_range(parallel, fast_reader, guess):
"""
Numbers with exponents beyond float64 range (|~4.94e-324 to 1.7977e+308|)
shall be returned as 0 and +-inf respectively by the C parser, just like
the Python parser.
Test fast converter only to nominal accuracy.
"""
# Python reader and strtod() are expected to return precise results
rtol = 1.e-30
# Update fast_reader dict; adapt relative precision for fast_converter
if fast_reader:
fast_reader['parallel'] = parallel
if fast_reader.get('use_fast_converter'):
rtol = 1.e-15
elif np.iinfo(np.int_).dtype == np.dtype(np.int32):
# On 32bit the standard C parser (strtod) returns strings for these
pytest.xfail("C parser cannot handle float64 on 32bit systems")
if parallel:
if not fast_reader:
pytest.skip("Multiprocessing only available in fast reader")
elif CI:
pytest.xfail("Multiprocessing can sometimes fail on CI")
test_for_warnings = fast_reader and not parallel
if not parallel and not fast_reader:
ctx = nullcontext()
else:
ctx = pytest.warns()
fields = ['10.1E+199', '3.14e+313', '2048e+306', '0.6E-325', '-2.e345']
values = np.array([1.01e200, np.inf, np.inf, 0.0, -np.inf])
# NOTE: Warning behavior varies for the parameters being passed in.
with ctx as w:
t = ascii.read(StringIO(' '.join(fields)), format='no_header',
guess=guess, fast_reader=fast_reader)
if test_for_warnings: # Assert precision warnings for cols 2-5
assert len(w) == 4
for i in range(len(w)):
assert (f"OverflowError converting to FloatType in column col{i+2}"
in str(w[i].message))
read_values = np.array([col[0] for col in t.itercols()])
assert_almost_equal(read_values, values, rtol=rtol, atol=1.e-324)
# Test some additional corner cases
fields = ['.0101E202', '0.000000314E+314', '1777E+305', '-1799E+305',
'0.2e-323', '5200e-327', ' 0.0000000000000000000001024E+330']
values = np.array([1.01e200, 3.14e307, 1.777e308, -np.inf, 0.0, 4.94e-324, 1.024e308])
with ctx as w:
t = ascii.read(StringIO(' '.join(fields)), format='no_header',
guess=guess, fast_reader=fast_reader)
if test_for_warnings: # Assert precision warnings for cols 4-6
assert len(w) == 3
for i in range(len(w)):
assert (f"OverflowError converting to FloatType in column col{i+4}"
in str(w[i].message))
read_values = np.array([col[0] for col in t.itercols()])
assert_almost_equal(read_values, values, rtol=rtol, atol=1.e-324)
# Test corner cases again with non-standard exponent_style (auto-detection)
if fast_reader and fast_reader.get('use_fast_converter'):
fast_reader.update({'exponent_style': 'A'})
else:
pytest.skip("Fortran exponent style only available in fast converter")
fields = ['.0101D202', '0.000000314d+314', '1777+305', '-1799E+305',
'0.2e-323', '2500-327', ' 0.0000000000000000000001024Q+330']
with ctx as w:
t = ascii.read(StringIO(' '.join(fields)), format='no_header',
guess=guess, fast_reader=fast_reader)
if test_for_warnings:
assert len(w) == 3
read_values = np.array([col[0] for col in t.itercols()])
assert_almost_equal(read_values, values, rtol=rtol, atol=1.e-324)
@pytest.mark.parametrize("guess", [True, False])
# fast_reader configurations: False| 'use_fast_converter'=False|True
@pytest.mark.parametrize('fast_reader', [False, dict(use_fast_converter=False),
dict(use_fast_converter=True)])
@pytest.mark.parametrize("parallel", [False, True])
def test_data_at_range_limit(parallel, fast_reader, guess):
"""
Test parsing of fixed-format float64 numbers near range limits
(|~4.94e-324 to 1.7977e+308|) - within limit for full precision
(|~2.5e-307| for strtod C parser, factor 10 better for fast_converter)
exact numbers shall be returned, beyond that an Overflow warning raised.
Input of exactly 0.0 must not raise an OverflowError.
"""
# Python reader and strtod() are expected to return precise results
rtol = 1.e-30
# Update fast_reader dict; adapt relative precision for fast_converter
if fast_reader:
fast_reader['parallel'] = parallel
if fast_reader.get('use_fast_converter'):
rtol = 1.e-15
elif np.iinfo(np.int_).dtype == np.dtype(np.int32):
# On 32bit the standard C parser (strtod) returns strings for these
pytest.xfail("C parser cannot handle float64 on 32bit systems")
if parallel:
if not fast_reader:
pytest.skip("Multiprocessing only available in fast reader")
elif CI:
pytest.xfail("Multiprocessing can sometimes fail on CI")
# Test very long fixed-format strings (to strtod range limit w/o Overflow)
for D in 99, 202, 305:
t = ascii.read(StringIO(99 * '0' + '.' + D * '0' + '1'), format='no_header',
guess=guess, fast_reader=fast_reader)
assert_almost_equal(t['col1'][0], 10.**-(D + 1), rtol=rtol, atol=1.e-324)
for D in 99, 202, 308:
t = ascii.read(StringIO('1' + D * '0' + '.0'), format='no_header',
guess=guess, fast_reader=fast_reader)
assert_almost_equal(t['col1'][0], 10.**D, rtol=rtol, atol=1.e-324)
# 0.0 is always exact (no Overflow warning)!
for s in '0.0', '0.0e+0', 399 * '0' + '.' + 365 * '0':
t = ascii.read(StringIO(s), format='no_header',
guess=guess, fast_reader=fast_reader)
assert t['col1'][0] == 0.0
# Test OverflowError at precision limit with laxer rtol
if parallel:
pytest.skip("Catching warnings broken in parallel mode")
elif not fast_reader:
pytest.skip("Python/numpy reader does not raise on Overflow")
with pytest.warns() as warning_lines:
t = ascii.read(StringIO('0.' + 314 * '0' + '1'), format='no_header',
guess=guess, fast_reader=fast_reader)
n_warns = len(warning_lines)
assert n_warns in (0, 1), f'Expected 0 or 1 warning, found {n_warns}'
if n_warns == 1:
assert 'OverflowError converting to FloatType in column col1, possibly resulting in degraded precision' in str(warning_lines[0].message) # noqa
assert_almost_equal(t['col1'][0], 1.e-315, rtol=1.e-10, atol=1.e-324)
@pytest.mark.parametrize("guess", [True, False])
@pytest.mark.parametrize("parallel", [False, True])
def test_int_out_of_range(parallel, guess):
"""
Integer numbers outside int range shall be returned as string columns
consistent with the standard (Python) parser (no 'upcasting' to float).
"""
imin = np.iinfo(int).min + 1
imax = np.iinfo(int).max - 1
huge = f'{imax+2:d}'
text = f'P M S\n {imax:d} {imin:d} {huge:s}'
expected = Table([[imax], [imin], [huge]], names=('P', 'M', 'S'))
# NOTE: Warning behavior varies for the parameters being passed in.
with pytest.warns() as w:
table = ascii.read(text, format='basic', guess=guess,
fast_reader={'parallel': parallel})
if not parallel:
assert len(w) == 1
assert ("OverflowError converting to IntType in column S, reverting to String"
in str(w[0].message))
assert_table_equal(table, expected)
# Check with leading zeroes to make sure strtol does not read them as octal
text = f'P M S\n000{imax:d} -0{-imin:d} 00{huge:s}'
expected = Table([[imax], [imin], ['00' + huge]], names=('P', 'M', 'S'))
with pytest.warns() as w:
table = ascii.read(text, format='basic', guess=guess,
fast_reader={'parallel': parallel})
if not parallel:
assert len(w) == 1
assert ("OverflowError converting to IntType in column S, reverting to String"
in str(w[0].message))
assert_table_equal(table, expected)
@pytest.mark.parametrize("guess", [True, False])
def test_int_out_of_order(guess):
"""
Mixed columns should be returned as float, but if the out-of-range integer
shows up first, it will produce a string column - with both readers.
Broken with the parallel fast_reader.
"""
imax = np.iinfo(int).max - 1
text = f'A B\n 12.3 {imax:d}0\n {imax:d}0 45.6e7'
expected = Table([[12.3, 10. * imax], [f'{imax:d}0', '45.6e7']],
names=('A', 'B'))
with pytest.warns(AstropyWarning, match=r'OverflowError converting to '
r'IntType in column B, reverting to String'):
table = ascii.read(text, format='basic', guess=guess, fast_reader=True)
assert_table_equal(table, expected)
with pytest.warns(AstropyWarning, match=r'OverflowError converting to '
r'IntType in column B, reverting to String'):
table = ascii.read(text, format='basic', guess=guess, fast_reader=False)
assert_table_equal(table, expected)
@pytest.mark.parametrize("guess", [True, False])
@pytest.mark.parametrize("parallel", [False, True])
def test_fortran_reader(parallel, guess):
"""
Make sure that ascii.read() can read Fortran-style exponential notation
using the fast_reader.
"""
# Check for nominal np.float64 precision
rtol = 1.e-15
atol = 0.0
text = 'A B C D\n100.01{:s}99 2.0 2.0{:s}-103 3\n' + \
' 4.2{:s}-1 5.0{:s}-1 0.6{:s}4 .017{:s}+309'
expc = Table([[1.0001e101, 0.42], [2, 0.5], [2.e-103, 6.e3], [3, 1.7e307]],
names=('A', 'B', 'C', 'D'))
expstyles = {'e': 6 * ('E'),
'D': ('D', 'd', 'd', 'D', 'd', 'D'),
'Q': 3 * ('q', 'Q'),
'Fortran': ('E', '0', 'D', 'Q', 'd', '0')}
# C strtod (not-fast converter) can't handle Fortran exp
with pytest.raises(FastOptionsError) as e:
ascii.read(text.format(*(6 * ('D'))), format='basic', guess=guess,
fast_reader={'use_fast_converter': False,
'parallel': parallel, 'exponent_style': 'D'})
assert 'fast_reader: exponent_style requires use_fast_converter' in str(e.value)
# Enable multiprocessing and the fast converter iterate over
# all style-exponent combinations, with auto-detection
for s, c in expstyles.items():
table = ascii.read(text.format(*c), guess=guess,
fast_reader={'parallel': parallel, 'exponent_style': s})
assert_table_equal(table, expc, rtol=rtol, atol=atol)
# Additional corner-case checks including triple-exponents without
# any character and mixed whitespace separators
text = 'A B\t\t C D\n1.0001+101 2.0+000\t 0.0002-099 3\n ' + \
'0.42-000 \t 0.5 6.+003 0.000000000000000000000017+330'
table = ascii.read(text, guess=guess,
fast_reader={'parallel': parallel, 'exponent_style': 'A'})
assert_table_equal(table, expc, rtol=rtol, atol=atol)
@pytest.mark.parametrize("guess", [True, False])
@pytest.mark.parametrize("parallel", [False, True])
def test_fortran_invalid_exp(parallel, guess):
"""
Test Fortran-style exponential notation in the fast_reader with invalid
exponent-like patterns (no triple-digits) to make sure they are returned
as strings instead, as with the standard C parser.
"""
if parallel and CI:
pytest.xfail("Multiprocessing can sometimes fail on CI")
formats = {'basic': ' ', 'tab': '\t', 'csv': ','}
header = ['S1', 'F2', 'S2', 'F3', 'S3', 'F4', 'F5', 'S4', 'I1', 'F6', 'F7']
# Tested entries and expected returns, first for auto-detect,
# then for different specified exponents
fields = ['1.0001+1', '.42d1', '2.3+10', '0.5', '3+1001', '3000.',
'2', '4.56e-2.3', '8000', '4.2-022', '.00000145e314']
vals_e = ['1.0001+1', '.42d1', '2.3+10', 0.5, '3+1001', 3.e3,
2, '4.56e-2.3', 8000, '4.2-022', 1.45e308]
vals_d = ['1.0001+1', 4.2, '2.3+10', 0.5, '3+1001', 3.e3,
2, '4.56e-2.3', 8000, '4.2-022', '.00000145e314']
vals_a = ['1.0001+1', 4.2, '2.3+10', 0.5, '3+1001', 3.e3,
2, '4.56e-2.3', 8000, 4.2e-22, 1.45e308]
vals_v = ['1.0001+1', 4.2, '2.3+10', 0.5, '3+1001', 3.e3,
2, '4.56e-2.3', 8000, '4.2-022', 1.45e308]
# Iterate over supported format types and separators
for f, s in formats.items():
t1 = ascii.read(StringIO(s.join(header) + '\n' + s.join(fields)),
format=f, guess=guess,
fast_reader={'parallel': parallel, 'exponent_style': 'A'})
assert_table_equal(t1, Table([[col] for col in vals_a], names=header))
# Non-basic separators require guessing enabled to be detected
if guess:
formats['bar'] = '|'
else:
formats = {'basic': ' '}
for s in formats.values():
t2 = ascii.read(StringIO(s.join(header) + '\n' + s.join(fields)), guess=guess,
fast_reader={'parallel': parallel, 'exponent_style': 'a'})
assert_table_equal(t2, Table([[col] for col in vals_a], names=header))
# Iterate for (default) expchar 'E'
for s in formats.values():
t3 = ascii.read(StringIO(s.join(header) + '\n' + s.join(fields)), guess=guess,
fast_reader={'parallel': parallel, 'use_fast_converter': True})
assert_table_equal(t3, Table([[col] for col in vals_e], names=header))
# Iterate for expchar 'D'
for s in formats.values():
t4 = ascii.read(StringIO(s.join(header) + '\n' + s.join(fields)), guess=guess,
fast_reader={'parallel': parallel, 'exponent_style': 'D'})
assert_table_equal(t4, Table([[col] for col in vals_d], names=header))
# Iterate for regular converter (strtod)
for s in formats.values():
t5 = ascii.read(StringIO(s.join(header) + '\n' + s.join(fields)), guess=guess,
fast_reader={'parallel': parallel, 'use_fast_converter': False})
read_values = [col[0] for col in t5.itercols()]
if os.name == 'nt':
# Apparently C strtod() on (some?) MSVC recognizes 'd' exponents!
assert read_values == vals_v or read_values == vals_e
else:
assert read_values == vals_e
def test_fortran_reader_notbasic():
"""
Check if readers without a fast option raise a value error when a
fast_reader is asked for (implies the default 'guess=True').
"""
tabstr = dedent("""
a b
1 1.23D4
2 5.67D-8
""")[1:-1]
t1 = ascii.read(tabstr.split('\n'), fast_reader=dict(exponent_style='D'))
assert t1['b'].dtype.kind == 'f'
tabrdb = dedent("""
a\tb
# A simple RDB table
N\tN
1\t 1.23D4
2\t 5.67-008
""")[1:-1]
t2 = ascii.read(tabrdb.split('\n'), format='rdb',
fast_reader=dict(exponent_style='fortran'))
assert t2['b'].dtype.kind == 'f'
tabrst = dedent("""
= =======
a b
= =======
1 1.23E4
2 5.67E-8
= =======
""")[1:-1]
t3 = ascii.read(tabrst.split('\n'), format='rst')
assert t3['b'].dtype.kind == 'f'
t4 = ascii.read(tabrst.split('\n'), guess=True)
assert t4['b'].dtype.kind == 'f'
# In the special case of fast_converter=True (the default),
# incompatibility is ignored
t5 = ascii.read(tabrst.split('\n'), format='rst', fast_reader=True)
assert t5['b'].dtype.kind == 'f'
with pytest.raises(ParameterError):
ascii.read(tabrst.split('\n'), format='rst', guess=False,
fast_reader='force')
with pytest.raises(ParameterError):
ascii.read(tabrst.split('\n'), format='rst', guess=False,
fast_reader=dict(use_fast_converter=False))
tabrst = tabrst.replace('E', 'D')
with pytest.raises(ParameterError):
ascii.read(tabrst.split('\n'), format='rst', guess=False,
fast_reader=dict(exponent_style='D'))
@pytest.mark.parametrize("guess", [True, False])
@pytest.mark.parametrize('fast_reader', [dict(exponent_style='D'),
dict(exponent_style='A')])
def test_dict_kwarg_integrity(fast_reader, guess):
"""
Check if dictionaries passed as kwargs (fast_reader in this test) are
left intact by ascii.read()
"""
expstyle = fast_reader.get('exponent_style', 'E')
fields = ['10.1D+199', '3.14d+313', '2048d+306', '0.6D-325', '-2.d345']
ascii.read(StringIO(' '.join(fields)), guess=guess,
fast_reader=fast_reader)
assert fast_reader.get('exponent_style', None) == expstyle
@pytest.mark.parametrize('fast_reader', [False,
dict(parallel=True),
dict(parallel=False)])
def test_read_empty_basic_table_with_comments(fast_reader):
"""
Test for reading a "basic" format table that has no data but has comments.
Tests the fix for #8267.
"""
dat = """
# comment 1
# comment 2
col1 col2
"""
t = ascii.read(dat, fast_reader=fast_reader)
assert t.meta['comments'] == ['comment 1', 'comment 2']
assert len(t) == 0
assert t.colnames == ['col1', 'col2']
@pytest.mark.parametrize('fast_reader', [dict(use_fast_converter=True),
dict(exponent_style='A')])
def test_conversion_fast(fast_reader):
"""
The reader should try to convert each column to ints. If this fails, the
reader should try to convert to floats. Failing this, i.e. on parsing
non-numeric input including isolated positive/negative signs, it should
fall back to strings.
"""
text = """
A B C D E F G H
1 a 3 4 5 6 7 8
2. 1 9 -.1e1 10.0 8.7 6 -5.3e4
4 2 -12 .4 +.e1 - + six
"""
table = ascii.read(text, fast_reader=fast_reader)
assert_equal(table['A'].dtype.kind, 'f')
assert table['B'].dtype.kind in ('S', 'U')
assert_equal(table['C'].dtype.kind, 'i')
assert_equal(table['D'].dtype.kind, 'f')
assert table['E'].dtype.kind in ('S', 'U')
assert table['F'].dtype.kind in ('S', 'U')
assert table['G'].dtype.kind in ('S', 'U')
assert table['H'].dtype.kind in ('S', 'U')
@pytest.mark.parametrize('delimiter', ['\n', '\r'])
@pytest.mark.parametrize('fast_reader', [False, True, 'force'])
def test_newline_as_delimiter(delimiter, fast_reader):
"""
Check that newline characters are correctly handled as delimiters.
Tests the fix for #9928.
"""
if delimiter == '\r':
eol = '\n'
else:
eol = '\r'
inp0 = ["a | b | c ", " 1 | '2' | 3.00000 "]
inp1 = "a {0:s} b {0:s}c{1:s} 1 {0:s}'2'{0:s} 3.0".format(delimiter, eol)
inp2 = [f"a {delimiter} b{delimiter} c",
f"1{delimiter} '2' {delimiter} 3.0"]
t0 = ascii.read(inp0, delimiter='|', fast_reader=fast_reader)
t1 = ascii.read(inp1, delimiter=delimiter, fast_reader=fast_reader)
t2 = ascii.read(inp2, delimiter=delimiter, fast_reader=fast_reader)
assert t1.colnames == t2.colnames == ['a', 'b', 'c']
assert len(t1) == len(t2) == 1
assert t1['b'].dtype.kind in ('S', 'U')
assert t2['b'].dtype.kind in ('S', 'U')
assert_table_equal(t1, t0)
assert_table_equal(t2, t0)
inp0 = 'a {0:s} b {0:s} c{1:s} 1 {0:s}"2"{0:s} 3.0'.format('|', eol)
inp1 = 'a {0:s} b {0:s} c{1:s} 1 {0:s}"2"{0:s} 3.0'.format(delimiter, eol)
t0 = ascii.read(inp0, delimiter='|', fast_reader=fast_reader)
t1 = ascii.read(inp1, delimiter=delimiter, fast_reader=fast_reader)
if not fast_reader:
pytest.xfail("Quoted fields are not parsed correctly by BaseSplitter")
assert_equal(t1['b'].dtype.kind, 'i')
@pytest.mark.parametrize('delimiter', [' ', '|', '\n', '\r'])
@pytest.mark.parametrize('fast_reader', [False, True, 'force'])
def test_single_line_string(delimiter, fast_reader):
"""
String input without a newline character is interpreted as filename,
unless element of an iterable. Maybe not logical, but test that it is
at least treated consistently.
"""
expected = Table([[1], [2], [3.00]], names=('col1', 'col2', 'col3'))
text = "1{0:s}2{0:s}3.0".format(delimiter)
if delimiter in ('\r', '\n'):
t1 = ascii.read(text, format='no_header', delimiter=delimiter, fast_reader=fast_reader)
assert_table_equal(t1, expected)
else:
# Windows raises OSError, but not the other OSes.
with pytest.raises((FileNotFoundError, OSError)):
t1 = ascii.read(text, format='no_header', delimiter=delimiter, fast_reader=fast_reader)
t2 = ascii.read([text], format='no_header', delimiter=delimiter, fast_reader=fast_reader)
assert_table_equal(t2, expected)
|
5ae30bd38e8b60a4a3e178f30dbe117368fc2695c76de7c6ffddbe46e89f7da4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module tests some of the methods related to the ``ECSV``
reader/writer.
"""
from astropy.table.column import MaskedColumn
import os
import copy
import sys
from io import StringIO
from contextlib import nullcontext
import pytest
import numpy as np
import yaml
from astropy.table import Table, Column, QTable
from astropy.table.table_helpers import simple_table
from astropy.units import allclose as quantity_allclose
from astropy.units import QuantityInfo
from astropy.utils.compat import NUMPY_LT_1_19_1
from astropy.io.ascii.ecsv import DELIMITERS, InvalidEcsvDatatypeWarning
from astropy.io import ascii
from astropy import units as u
from astropy.io.tests.mixin_columns import mixin_cols, compare_attrs, serialized_names
from .common import TEST_DIR
DTYPES = ['bool', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32',
'uint64', 'float16', 'float32', 'float64', 'float128',
'str']
if not hasattr(np, 'float128') or os.name == 'nt' or sys.maxsize <= 2**32:
DTYPES.remove('float128')
T_DTYPES = Table()
for dtype in DTYPES:
if dtype == 'bool':
data = np.array([False, True, False])
elif dtype == 'str':
data = np.array(['ab 0', 'ab, 1', 'ab2'])
else:
data = np.arange(3, dtype=dtype)
c = Column(data, unit='m / s', description='descr_' + dtype,
meta={'meta ' + dtype: 1})
T_DTYPES[dtype] = c
T_DTYPES.meta['comments'] = ['comment1', 'comment2']
# Corresponds to simple_table()
SIMPLE_LINES = ['# %ECSV 1.0',
'# ---',
'# datatype:',
'# - {name: a, datatype: int64}',
'# - {name: b, datatype: float64}',
'# - {name: c, datatype: string}',
'# schema: astropy-2.0',
'a b c',
'1 1.0 c',
'2 2.0 d',
'3 3.0 e']
def test_write_simple():
"""
Write a simple table with common types. This shows the compact version
of serialization with one line per column.
"""
t = simple_table()
out = StringIO()
t.write(out, format='ascii.ecsv')
assert out.getvalue().splitlines() == SIMPLE_LINES
def test_write_full():
"""
Write a full-featured table with common types and explicitly checkout output
"""
t = T_DTYPES['bool', 'int64', 'float64', 'str']
lines = ['# %ECSV 1.0',
'# ---',
'# datatype:',
'# - name: bool',
'# unit: m / s',
'# datatype: bool',
'# description: descr_bool',
'# meta: {meta bool: 1}',
'# - name: int64',
'# unit: m / s',
'# datatype: int64',
'# description: descr_int64',
'# meta: {meta int64: 1}',
'# - name: float64',
'# unit: m / s',
'# datatype: float64',
'# description: descr_float64',
'# meta: {meta float64: 1}',
'# - name: str',
'# unit: m / s',
'# datatype: string',
'# description: descr_str',
'# meta: {meta str: 1}',
'# meta: !!omap',
'# - comments: [comment1, comment2]',
'# schema: astropy-2.0',
'bool int64 float64 str',
'False 0 0.0 "ab 0"',
'True 1 1.0 "ab, 1"',
'False 2 2.0 ab2']
out = StringIO()
t.write(out, format='ascii.ecsv')
assert out.getvalue().splitlines() == lines
def test_write_read_roundtrip():
"""
Write a full-featured table with all types and see that it round-trips on
readback. Use both space and comma delimiters.
"""
t = T_DTYPES
for delimiter in DELIMITERS:
out = StringIO()
t.write(out, format='ascii.ecsv', delimiter=delimiter)
t2s = [Table.read(out.getvalue(), format='ascii.ecsv'),
Table.read(out.getvalue(), format='ascii'),
ascii.read(out.getvalue()),
ascii.read(out.getvalue(), format='ecsv', guess=False),
ascii.read(out.getvalue(), format='ecsv')]
for t2 in t2s:
assert t.meta == t2.meta
for name in t.colnames:
assert t[name].attrs_equal(t2[name])
assert np.all(t[name] == t2[name])
def test_bad_delimiter():
"""
Passing a delimiter other than space or comma gives an exception
"""
out = StringIO()
with pytest.raises(ValueError) as err:
T_DTYPES.write(out, format='ascii.ecsv', delimiter='|')
assert 'only space and comma are allowed' in str(err.value)
def test_bad_header_start():
"""
Bad header without initial # %ECSV x.x
"""
lines = copy.copy(SIMPLE_LINES)
lines[0] = '# %ECV 0.9'
with pytest.raises(ascii.InconsistentTableError):
Table.read('\n'.join(lines), format='ascii.ecsv', guess=False)
def test_bad_delimiter_input():
"""
Illegal delimiter in input
"""
lines = copy.copy(SIMPLE_LINES)
lines.insert(2, '# delimiter: |')
with pytest.raises(ValueError) as err:
Table.read('\n'.join(lines), format='ascii.ecsv', guess=False)
assert 'only space and comma are allowed' in str(err.value)
def test_multidim_input():
"""
Multi-dimensional column in input
"""
t = Table()
t['a'] = np.arange(24).reshape(2, 3, 4)
t['a'].info.description = 'description'
t['a'].info.meta = {1: 2}
t['b'] = [1, 2]
out = StringIO()
t.write(out, format='ascii.ecsv')
t2 = Table.read(out.getvalue(), format='ascii.ecsv')
assert np.all(t2['a'] == t['a'])
assert t2['a'].shape == t['a'].shape
assert t2['a'].dtype == t['a'].dtype
assert t2['a'].info.description == t['a'].info.description
assert t2['a'].info.meta == t['a'].info.meta
assert np.all(t2['b'] == t['b'])
def test_structured_input():
"""
Structured column in input.
"""
t = Table()
# Add unit, description and meta to make sure that round-trips as well.
t['a'] = Column([('B', (1., [2., 3.])),
('A', (9., [8., 7.]))],
dtype=[('s', 'U1'), ('v', [('p0', 'f8'), ('p1', '2f8')])],
description='description',
format='>', # Most formats do not work with structured!
unit='m', # Overall unit should round-trip.
meta={1: 2})
t['b'] = Column([[(1., 2.), (9., 8.)],
[(3., 4.), (7., 6.)]],
dtype='f8,f8',
unit=u.Unit('m,s') # Per part unit should round-trip too.
)
out = StringIO()
t.write(out, format='ascii.ecsv')
t2 = Table.read(out.getvalue(), format='ascii.ecsv')
for col in t.colnames:
assert np.all(t2[col] == t[col])
assert t2[col].shape == t[col].shape
assert t2[col].dtype == t[col].dtype
assert t2[col].unit == t[col].unit
assert t2[col].format == t[col].format
assert t2[col].info.description == t[col].info.description
assert t2[col].info.meta == t[col].info.meta
def test_round_trip_empty_table():
"""Test fix in #5010 for issue #5009 (ECSV fails for empty type with bool type)"""
t = Table(dtype=[bool, 'i', 'f'], names=['a', 'b', 'c'])
out = StringIO()
t.write(out, format='ascii.ecsv')
t2 = Table.read(out.getvalue(), format='ascii.ecsv')
assert t.dtype == t2.dtype
assert len(t2) == 0
def test_csv_ecsv_colnames_mismatch():
"""
Test that mismatch in column names from normal CSV header vs.
ECSV YAML header raises the expected exception.
"""
lines = copy.copy(SIMPLE_LINES)
header_index = lines.index('a b c')
lines[header_index] = 'a b d'
with pytest.raises(ValueError) as err:
ascii.read(lines, format='ecsv')
assert "column names from ECSV header ['a', 'b', 'c']" in str(err.value)
def test_regression_5604():
"""
See https://github.com/astropy/astropy/issues/5604 for more.
"""
t = Table()
t.meta = {"foo": 5 * u.km, "foo2": u.s}
t["bar"] = [7] * u.km
out = StringIO()
t.write(out, format="ascii.ecsv")
assert '!astropy.units.Unit' in out.getvalue()
assert '!astropy.units.Quantity' in out.getvalue()
def assert_objects_equal(obj1, obj2, attrs, compare_class=True):
if compare_class:
assert obj1.__class__ is obj2.__class__
assert obj1.shape == obj2.shape
info_attrs = ['info.name', 'info.format', 'info.unit', 'info.description',
'info.dtype']
for attr in attrs + info_attrs:
a1 = obj1
a2 = obj2
for subattr in attr.split('.'):
try:
a1 = getattr(a1, subattr)
a2 = getattr(a2, subattr)
except AttributeError:
a1 = a1[subattr]
a2 = a2[subattr]
if isinstance(a1, np.ndarray) and a1.dtype.kind == 'f':
assert quantity_allclose(a1, a2, rtol=1e-10)
else:
assert np.all(a1 == a2)
# For no attrs that means we just compare directly.
if not attrs:
if isinstance(obj1, np.ndarray) and obj1.dtype.kind == 'f':
assert quantity_allclose(obj1, obj2, rtol=1e-15)
else:
assert np.all(obj1 == obj2)
def test_ecsv_mixins_ascii_read_class():
"""Ensure that ascii.read(ecsv_file) returns the correct class
(QTable if any Quantity subclasses, Table otherwise).
"""
# Make a table with every mixin type except Quantities
t = QTable({name: col for name, col in mixin_cols.items()
if not isinstance(col.info, QuantityInfo)})
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = ascii.read(out.getvalue(), format='ecsv')
assert type(t2) is Table
# Add a single quantity column
t['lon'] = mixin_cols['lon']
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = ascii.read(out.getvalue(), format='ecsv')
assert type(t2) is QTable
def test_ecsv_mixins_qtable_to_table():
"""Test writing as QTable and reading as Table. Ensure correct classes
come out.
"""
names = sorted(mixin_cols)
t = QTable([mixin_cols[name] for name in names], names=names)
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = Table.read(out.getvalue(), format='ascii.ecsv')
assert t.colnames == t2.colnames
for name, col in t.columns.items():
col2 = t2[name]
attrs = compare_attrs[name]
compare_class = True
if isinstance(col.info, QuantityInfo):
# Downgrade Quantity to Column + unit
assert type(col2) is Column
# Class-specific attributes like `value` or `wrap_angle` are lost.
attrs = ['unit']
compare_class = False
# Compare data values here (assert_objects_equal doesn't know how in this case)
assert np.allclose(col.value, col2, rtol=1e-10)
assert_objects_equal(col, col2, attrs, compare_class)
@pytest.mark.parametrize('table_cls', (Table, QTable))
def test_ecsv_mixins_as_one(table_cls):
"""Test write/read all cols at once and validate intermediate column names"""
names = sorted(mixin_cols)
all_serialized_names = []
# ECSV stores times as value by default, so we just get the column back.
# One exception is tm3, which is set to serialize via jd1 and jd2.
for name in names:
s_names = serialized_names[name]
if not name.startswith('tm3'):
s_names = [s_name.replace('.jd1', '') for s_name in s_names
if not s_name.endswith('jd2')]
all_serialized_names.extend(s_names)
t = table_cls([mixin_cols[name] for name in names], names=names)
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = table_cls.read(out.getvalue(), format='ascii.ecsv')
assert t.colnames == t2.colnames
# Read as a ascii.basic table (skip all the ECSV junk)
t3 = table_cls.read(out.getvalue(), format='ascii.basic')
assert t3.colnames == all_serialized_names
def make_multidim(col, ndim):
"""Take a col with length=2 and make it N-d by repeating elements.
For the special case of ndim==1 just return the original.
The output has shape [3] * ndim. By using 3 we can be sure that repeating
the two input elements gives an output that is sufficiently unique for
the multidim tests.
"""
if ndim > 1:
import itertools
idxs = [idx for idx, _ in zip(itertools.cycle([0, 1]), range(3 ** ndim))]
col = col[idxs].reshape([3] * ndim)
return col
@pytest.mark.parametrize('name_col', list(mixin_cols.items()))
@pytest.mark.parametrize('table_cls', (Table, QTable))
@pytest.mark.parametrize('ndim', (1, 2, 3))
def test_ecsv_mixins_per_column(table_cls, name_col, ndim):
"""Test write/read one col at a time and do detailed validation.
This tests every input column type as 1-d, 2-d and 3-d.
"""
name, col = name_col
c = make_multidim(np.array([1.0, 2.0]), ndim)
col = make_multidim(col, ndim)
t = table_cls([c, col, c], names=['c1', name, 'c2'])
t[name].info.description = 'description'
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = table_cls.read(out.getvalue(), format='ascii.ecsv')
assert t.colnames == t2.colnames
for colname in t.colnames:
assert len(t2[colname].shape) == ndim
if colname in ('c1', 'c2'):
compare = ['data']
else:
# Storing Longitude as Column loses wrap_angle.
compare = [attr for attr in compare_attrs[colname]
if not (attr == 'wrap_angle' and table_cls is Table)]
assert_objects_equal(t[colname], t2[colname], compare)
# Special case to make sure Column type doesn't leak into Time class data
if name.startswith('tm'):
assert t2[name]._time.jd1.__class__ is np.ndarray
assert t2[name]._time.jd2.__class__ is np.ndarray
def test_round_trip_masked_table_default(tmpdir):
"""Test (mostly) round-trip of MaskedColumn through ECSV using default serialization
that uses an empty string "" to mark NULL values. Note:
>>> simple_table(masked=True)
<Table masked=True length=3>
a b c
int64 float64 str1
----- ------- ----
-- 1.0 c
2 2.0 --
3 -- e
"""
filename = str(tmpdir.join('test.ecsv'))
t = simple_table(masked=True) # int, float, and str cols with one masked element
t.write(filename)
t2 = Table.read(filename)
assert t2.masked is False
assert t2.colnames == t.colnames
for name in t2.colnames:
# From formal perspective the round-trip columns are the "same"
assert np.all(t2[name].mask == t[name].mask)
assert np.all(t2[name] == t[name])
# But peeking under the mask shows that the underlying data are changed
# because by default ECSV uses "" to represent masked elements.
t[name].mask = False
t2[name].mask = False
assert not np.all(t2[name] == t[name]) # Expected diff
def test_round_trip_masked_table_serialize_mask(tmpdir):
"""Same as prev but set the serialize_method to 'data_mask' so mask is written out"""
filename = str(tmpdir.join('test.ecsv'))
t = simple_table(masked=True) # int, float, and str cols with one masked element
t['c'][0] = '' # This would come back as masked for default "" NULL marker
# MaskedColumn with no masked elements. See table the MaskedColumnInfo class
# _represent_as_dict() method for info about how we test a column with no masked elements.
t['d'] = [1, 2, 3]
t.write(filename, serialize_method='data_mask')
t2 = Table.read(filename)
assert t2.masked is False
assert t2.colnames == t.colnames
for name in t2.colnames:
assert np.all(t2[name].mask == t[name].mask)
assert np.all(t2[name] == t[name])
# Data under the mask round-trips also (unmask data to show this).
t[name].mask = False
t2[name].mask = False
assert np.all(t2[name] == t[name])
@pytest.mark.parametrize('table_cls', (Table, QTable))
def test_ecsv_round_trip_user_defined_unit(table_cls, tmpdir):
"""Ensure that we can read-back enabled user-defined units."""
# Test adapted from #8897, where it was noted that this works
# but was not tested.
filename = str(tmpdir.join('test.ecsv'))
unit = u.def_unit('bandpass_sol_lum')
t = table_cls()
t['l'] = np.arange(5) * unit
t.write(filename)
# without the unit enabled, get UnrecognizedUnit
if table_cls is QTable:
ctx = pytest.warns(u.UnitsWarning, match=r"'bandpass_sol_lum' did not parse .*")
else:
ctx = nullcontext()
# Note: The read might also generate ResourceWarning, in addition to UnitsWarning
with ctx:
t2 = table_cls.read(filename)
assert isinstance(t2['l'].unit, u.UnrecognizedUnit)
assert str(t2['l'].unit) == 'bandpass_sol_lum'
if table_cls is QTable:
assert np.all(t2['l'].value == t['l'].value)
else:
assert np.all(t2['l'] == t['l'])
# But with it enabled, it works.
with u.add_enabled_units(unit):
t3 = table_cls.read(filename)
assert t3['l'].unit is unit
assert np.all(t3['l'] == t['l'])
# Just to be sure, aloso try writing with unit enabled.
filename2 = str(tmpdir.join('test2.ecsv'))
t3.write(filename2)
t4 = table_cls.read(filename)
assert t4['l'].unit is unit
assert np.all(t4['l'] == t['l'])
def test_read_masked_bool():
txt = """\
# %ECSV 1.0
# ---
# datatype:
# - {name: col0, datatype: bool}
# schema: astropy-2.0
col0
1
0
True
""
False
"""
dat = ascii.read(txt, format='ecsv')
col = dat['col0']
assert isinstance(col, MaskedColumn)
assert np.all(col.mask == [False, False, False, True, False])
assert np.all(col == [True, False, True, False, False])
@pytest.mark.parametrize('serialize_method', ['null_value', 'data_mask'])
@pytest.mark.parametrize('dtype', [np.int64, np.float64, bool, str])
@pytest.mark.parametrize('delimiter', [',', ' '])
def test_roundtrip_multidim_masked_array(serialize_method, dtype, delimiter):
# TODO also test empty string with null value
t = Table()
col = MaskedColumn(np.arange(12).reshape(2, 3, 2), dtype=dtype)
if dtype is str:
# np does something funny and gives a dtype of U21.
col = col.astype('U2')
col.mask[0, 0, 0] = True
col.mask[1, 1, 1] = True
t['a'] = col
t['b'] = ['x', 'y'] # Add another column for kicks
out = StringIO()
t.write(out, format='ascii.ecsv', serialize_method=serialize_method)
t2 = Table.read(out.getvalue(), format='ascii.ecsv')
assert t2.masked is False
assert t2.colnames == t.colnames
for name in t2.colnames:
assert t2[name].dtype == t[name].dtype
if hasattr(t[name], 'mask'):
assert np.all(t2[name].mask == t[name].mask)
assert np.all(t2[name] == t[name])
@pytest.mark.parametrize('subtype', ['some-user-type', 'complex'])
def test_multidim_unknown_subtype(subtype):
"""Test an ECSV file with a string type but unknown subtype"""
txt = f"""\
# %ECSV 1.0
# ---
# datatype:
# - name: a
# datatype: string
# subtype: {subtype}
# schema: astropy-2.0
a
[1,2]
[3,4]"""
with pytest.warns(InvalidEcsvDatatypeWarning,
match=rf"unexpected subtype '{subtype}' set for column 'a'"):
t = ascii.read(txt, format='ecsv')
assert t['a'].dtype.kind == 'U'
assert t['a'][0] == '[1,2]'
def test_multidim_bad_shape():
"""Test a malformed ECSV file"""
txt = """\
# %ECSV 1.0
# ---
# datatype:
# - name: a
# datatype: string
# subtype: int64[3]
# schema: astropy-2.0
a
[1,2]
[3,4]"""
with pytest.raises(ValueError, match="column 'a' failed to convert: shape mismatch"):
Table.read(txt, format='ascii.ecsv')
def test_write_not_json_serializable():
t = Table()
t['a'] = np.array([set([1, 2]), 1], dtype=object)
match = "could not convert column 'a' to string: Object of type set is not JSON serializable"
out = StringIO()
with pytest.raises(TypeError, match=match):
t.write(out, format='ascii.ecsv')
def test_read_not_json_serializable():
"""Test a malformed ECSV file"""
txt = """\
# %ECSV 1.0
# ---
# datatype:
# - {name: a, datatype: string, subtype: json}
# schema: astropy-2.0
a
fail
[3,4]"""
match = "column 'a' failed to convert: column value is not valid JSON"
with pytest.raises(ValueError, match=match):
Table.read(txt, format='ascii.ecsv')
def test_read_bad_datatype():
"""Test a malformed ECSV file"""
txt = """\
# %ECSV 1.0
# ---
# datatype:
# - {name: a, datatype: object}
# schema: astropy-2.0
a
fail
[3,4]"""
with pytest.warns(InvalidEcsvDatatypeWarning,
match="unexpected datatype 'object' of column 'a' is not in allowed"):
t = Table.read(txt, format='ascii.ecsv')
assert t['a'][0] == "fail"
assert type(t['a'][1]) is str
assert type(t['a'].dtype) == np.dtype("O")
@pytest.mark.skipif(NUMPY_LT_1_19_1,
reason="numpy cannot parse 'complex' as string until 1.19+")
def test_read_complex():
"""Test an ECSV v1.0 file with a complex column"""
txt = """\
# %ECSV 1.0
# ---
# datatype:
# - {name: a, datatype: complex}
# schema: astropy-2.0
a
1+1j
2+2j"""
with pytest.warns(InvalidEcsvDatatypeWarning,
match="unexpected datatype 'complex' of column 'a' is not in allowed"):
t = Table.read(txt, format='ascii.ecsv')
assert t['a'].dtype.type is np.complex128
def test_read_str():
"""Test an ECSV file with a 'str' instead of 'string' datatype """
txt = """\
# %ECSV 1.0
# ---
# datatype:
# - {name: a, datatype: str}
# schema: astropy-2.0
a
sometext
S""" # also testing single character text
with pytest.warns(InvalidEcsvDatatypeWarning,
match="unexpected datatype 'str' of column 'a' is not in allowed"):
t = Table.read(txt, format='ascii.ecsv')
assert isinstance(t['a'][1], str)
assert isinstance(t['a'][0], np.str_)
def test_read_bad_datatype_for_object_subtype():
"""Test a malformed ECSV file"""
txt = """\
# %ECSV 1.0
# ---
# datatype:
# - {name: a, datatype: int64, subtype: json}
# schema: astropy-2.0
a
fail
[3,4]"""
match = "column 'a' failed to convert: datatype of column 'a' must be \"string\""
with pytest.raises(ValueError, match=match):
Table.read(txt, format='ascii.ecsv')
def test_full_repr_roundtrip():
"""Test round-trip of float values to full precision even with format
specified"""
t = Table()
t['a'] = np.array([np.pi, 1/7], dtype=np.float64)
t['a'].info.format = '.2f'
out = StringIO()
t.write(out, format='ascii.ecsv')
t2 = Table.read(out.getvalue(), format='ascii.ecsv')
assert np.all(t['a'] == t2['a'])
assert t2['a'].info.format == '.2f'
#############################################################################
# Define a number of specialized columns for testing and the expected values
# of `datatype` for each column.
#############################################################################
# First here is some helper code used to make the expected outputs code.
def _get_ecsv_header_dict(text):
lines = [line.strip() for line in text.splitlines()]
lines = [line[2:] for line in lines if line.startswith('#')]
lines = lines[2:] # Get rid of the header
out = yaml.safe_load('\n'.join(lines))
return out
def _make_expected_values(cols):
from pprint import pformat
for name, col in cols.items():
t = Table()
t[name] = col
out = StringIO()
t.write(out, format='ascii.ecsv')
hdr = _get_ecsv_header_dict(out.getvalue())
fmt_hdr = pformat(hdr['datatype'])
print(f'exps[{name!r}] =', fmt_hdr[:1])
print(fmt_hdr[1:])
print()
# Expected values of `datatype` for each column
exps = {}
cols = {}
# Run of the mill scalar for completeness
cols['scalar'] = np.array([1, 2], dtype=np.int16)
exps['scalar'] = [
{'datatype': 'int16', 'name': 'scalar'}]
# Array of lists that works as a 2-d variable array. This is just treated
# as an object.
cols['2-d variable array lists'] = c = np.empty(shape=(2,), dtype=object)
c[0] = [[1, 2], ["a", 4]]
c[1] = [[1, 2, 3], [4, 5.25, 6]]
exps['2-d variable array lists'] = [
{'datatype': 'string',
'name': '2-d variable array lists',
'subtype': 'json'}]
# Array of numpy arrays that is a 2-d variable array
cols['2-d variable array numpy'] = c = np.empty(shape=(2,), dtype=object)
c[0] = np.array([[1, 2], [3, 4]], dtype=np.float32)
c[1] = np.array([[1, 2, 3], [4, 5.5, 6]], dtype=np.float32)
exps['2-d variable array numpy'] = [
{'datatype': 'string',
'name': '2-d variable array numpy',
'subtype': 'float32[2,null]'}]
cols['1-d variable array lists'] = np.array([[1, 2], [3, 4, 5]], dtype=object)
exps['1-d variable array lists'] = [
{'datatype': 'string',
'name': '1-d variable array lists',
'subtype': 'json'}]
# Variable-length array
cols['1-d variable array numpy'] = np.array(
[np.array([1, 2], dtype=np.uint8),
np.array([3, 4, 5], dtype=np.uint8)], dtype=object)
exps['1-d variable array numpy'] = [
{'datatype': 'string',
'name': '1-d variable array numpy',
'subtype': 'uint8[null]'}]
cols['1-d variable array numpy str'] = np.array(
[np.array(['a', 'b']),
np.array(['c', 'd', 'e'])], dtype=object)
exps['1-d variable array numpy str'] = [
{'datatype': 'string',
'name': '1-d variable array numpy str',
'subtype': 'string[null]'}]
cols['1-d variable array numpy bool'] = np.array(
[np.array([True, False]),
np.array([True, False, True])], dtype=object)
exps['1-d variable array numpy bool'] = [
{'datatype': 'string',
'name': '1-d variable array numpy bool',
'subtype': 'bool[null]'}]
cols['1-d regular array'] = np.array([[1, 2], [3, 4]], dtype=np.int8)
exps['1-d regular array'] = [
{'datatype': 'string',
'name': '1-d regular array',
'subtype': 'int8[2]'}]
cols['2-d regular array'] = np.arange(8, dtype=np.float16).reshape(2, 2, 2)
exps['2-d regular array'] = [
{'datatype': 'string',
'name': '2-d regular array',
'subtype': 'float16[2,2]'}]
cols['scalar object'] = np.array([{'a': 1}, {'b': 2}], dtype=object)
exps['scalar object'] = [
{'datatype': 'string', 'name': 'scalar object', 'subtype': 'json'}]
cols['1-d object'] = np.array(
[[{'a': 1}, {'b': 2}],
[{'a': 1}, {'b': 2}]], dtype=object)
exps['1-d object'] = [
{'datatype': 'string',
'name': '1-d object',
'subtype': 'json[2]'}]
@pytest.mark.parametrize('name,col,exp',
list(zip(cols, cols.values(), exps.values())))
def test_specialized_columns(name, col, exp):
"""Test variable length lists, multidim columns, object columns.
"""
t = Table()
t[name] = col
out = StringIO()
t.write(out, format='ascii.ecsv')
hdr = _get_ecsv_header_dict(out.getvalue())
assert hdr['datatype'] == exp
t2 = Table.read(out.getvalue(), format='ascii.ecsv')
assert t2.colnames == t.colnames
for name in t2.colnames:
assert t2[name].dtype == t[name].dtype
for val1, val2 in zip(t2[name], t[name]):
if isinstance(val1, np.ndarray):
assert val1.dtype == val2.dtype
assert np.all(val1 == val2)
def test_full_subtypes():
"""Read ECSV file created by M. Taylor that includes scalar, fixed array,
variable array for all datatypes. This file has missing values for all
columns as both per-value null and blank entries for the entire column
value.
Note: original file was modified to include blank values in f_float and
f_double columns.
"""
t = Table.read(os.path.join(TEST_DIR, 'data', 'subtypes.ecsv'))
colnames = ('i_index,'
's_byte,s_short,s_int,s_long,s_float,s_double,s_string,s_boolean,'
'f_byte,f_short,f_int,f_long,f_float,f_double,f_string,f_boolean,'
'v_byte,v_short,v_int,v_long,v_float,v_double,v_string,v_boolean,'
'm_int,m_double').split(',')
assert t.colnames == colnames
type_map = {'byte': 'int8',
'short': 'int16',
'int': 'int32',
'long': 'int64',
'float': 'float32',
'double': 'float64',
'string': 'str',
'boolean': 'bool'}
for col in t.itercols():
info = col.info
if info.name == 'i_index':
continue
assert isinstance(col, MaskedColumn)
type_name = info.name[2:] # short, int, etc
subtype = info.name[:1]
if subtype == 's': # Scalar
assert col.shape == (16,)
if subtype == 'f': # Fixed array
assert col.shape == (16, 3)
if subtype == 'v': # Variable array
assert col.shape == (16,)
assert info.dtype.name == 'object'
for val in col:
assert isinstance(val, np.ndarray)
assert val.dtype.name.startswith(type_map[type_name])
assert len(val) in [0, 1, 2, 3]
else:
assert info.dtype.name.startswith(type_map[type_name])
def test_masked_empty_subtypes():
"""Test blank field in subtypes. Similar to previous test but with explicit
checks of values"""
txt = """
# %ECSV 1.0
# ---
# datatype:
# - {name: o, datatype: string, subtype: json}
# - {name: f, datatype: string, subtype: 'int64[2]'}
# - {name: v, datatype: string, subtype: 'int64[null]'}
# schema: astropy-2.0
o f v
null [0,1] [1]
"" "" ""
[1,2] [2,3] [2,3]
"""
t = Table.read(txt, format='ascii.ecsv')
assert np.all(t['o'] == np.array([None, -1, [1, 2]], dtype=object))
assert np.all(t['o'].mask == [False, True, False])
exp = np.ma.array([[0, 1], [-1, -1], [2, 3]], mask=[[0, 0], [1, 1], [0, 0]])
assert np.all(t['f'] == exp)
assert np.all(t['f'].mask == exp.mask)
assert np.all(t['v'][0] == [1])
assert np.all(t['v'][2] == [2, 3])
assert np.all(t['v'].mask == [False, True, False])
def test_masked_vals_in_array_subtypes():
"""Test null values in fixed and variable array subtypes."""
t = Table()
t['f'] = np.ma.array([[1, 2], [3, 4]], mask=[[0, 1], [1, 0]], dtype=np.int64)
t['v'] = np.empty(2, dtype=object)
t['v'][0] = np.ma.array([1, 2], mask=[0, 1], dtype=np.int64)
t['v'][1] = np.ma.array([3, 4, 5], mask=[1, 0, 0], dtype=np.int64)
out = StringIO()
t.write(out, format='ascii.ecsv')
txt = """
# %ECSV 1.0
# ---
# datatype:
# - {name: f, datatype: string, subtype: 'int64[2]'}
# - {name: v, datatype: string, subtype: 'int64[null]'}
# schema: astropy-2.0
f v
[1,null] [1,null]
[null,4] [null,4,5]
"""
hdr = _get_ecsv_header_dict(out.getvalue())
hdr_exp = _get_ecsv_header_dict(txt)
assert hdr == hdr_exp
t2 = Table.read(out.getvalue(), format='ascii.ecsv')
assert t2.colnames == t.colnames
for name in t2.colnames:
assert t2[name].dtype == t[name].dtype
assert type(t2[name]) is type(t[name]) # noqa
for val1, val2 in zip(t2[name], t[name]):
if isinstance(val1, np.ndarray):
assert val1.dtype == val2.dtype
if isinstance(val1, np.ma.MaskedArray):
assert np.all(val1.mask == val2.mask)
assert np.all(val1 == val2)
def test_guess_ecsv_with_one_column():
"""Except for ECSV, guessing always requires at least 2 columns"""
txt = """
# %ECSV 1.0
# ---
# datatype:
# - {name: col, datatype: string, description: hello}
# schema: astropy-2.0
col
1
2
"""
t = ascii.read(txt)
assert t['col'].dtype.kind == 'U' # would be int with basic format
assert t['col'].description == 'hello'
|
a5466a9a80d72b5df63bfe7499a008153617d75c60ab336a958e2c77c1a54711 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.io.ascii.core import convert_numpy
import re
from io import BytesIO, open
from collections import OrderedDict
import locale
import platform
from io import StringIO
import pathlib
import pytest
import numpy as np
from astropy.io import ascii
from astropy.table import Table, MaskedColumn
from astropy import table
from astropy.units import Unit
from astropy.table.table_helpers import simple_table
from .common import (assert_equal, assert_almost_equal,
assert_true)
from astropy.io.ascii import core
from astropy.io.ascii.ui import _probably_html, get_read_trace
from astropy.utils.data import get_pkg_data_path
from astropy.utils.exceptions import AstropyWarning
# NOTE: Python can be built without bz2.
from astropy.utils.compat.optional_deps import HAS_BZ2 # noqa
# setup/teardown function to have the tests run in the correct directory
from .common import setup_function, teardown_function # noqa
def asciiIO(x):
return BytesIO(x.encode('ascii'))
@pytest.fixture
def home_is_data(monkeypatch, request):
"""
Pytest fixture to run a test case with tilde-prefixed paths.
In the tilde-path case, environment variables are temporarily
modified so that '~' resolves to the data directory.
"""
path = get_pkg_data_path('data')
# For Unix
monkeypatch.setenv('HOME', path)
# For Windows
monkeypatch.setenv('USERPROFILE', path)
@pytest.mark.parametrize('fast_reader', [True, False, {'use_fast_converter': False},
{'use_fast_converter': True}, 'force'])
def test_convert_overflow(fast_reader):
"""
Test reading an extremely large integer, which falls through to
string due to an overflow error (#2234). The C parsers used to
return inf (kind 'f') for this.
"""
expected_kind = 'U'
with pytest.warns(AstropyWarning, match="OverflowError converting to IntType in column a"):
dat = ascii.read(['a', '1' * 10000], format='basic',
fast_reader=fast_reader, guess=False)
assert dat['a'].dtype.kind == expected_kind
def test_read_specify_converters_with_names():
"""
Exact example from #9701: When using ascii.read with both the names and
converters arguments, the converters dictionary ignores the user-supplied
names and requires that you know the guessed names.
"""
csv_text = ['a,b,c', '1,2,3', '4,5,6']
names = ['A', 'B', 'C']
converters = {
'A': [ascii.convert_numpy(float)],
'B': [ascii.convert_numpy(int)],
'C': [ascii.convert_numpy(str)]
}
t = ascii.read(csv_text, format='csv', names=names, converters=converters)
assert t['A'].dtype.kind == 'f'
assert t['B'].dtype.kind == 'i'
assert t['C'].dtype.kind == 'U'
def test_read_remove_and_rename_columns():
csv_text = ['a,b,c', '1,2,3', '4,5,6']
reader = ascii.get_reader(Reader=ascii.Csv)
reader.read(csv_text)
header = reader.header
with pytest.raises(KeyError, match='Column NOT-EXIST does not exist'):
header.remove_columns(['NOT-EXIST'])
header.remove_columns(['c'])
assert header.colnames == ('a', 'b')
header.rename_column('a', 'aa')
assert header.colnames == ('aa', 'b')
with pytest.raises(KeyError, match='Column NOT-EXIST does not exist'):
header.rename_column('NOT-EXIST', 'aa')
def test_guess_with_names_arg():
"""
Make sure reading a table with guess=True gives the expected result when
the names arg is specified.
"""
# This is a NoHeader format table and so `names` should replace
# the default col0, col1 names. It fails as a Basic format
# table when guessing because the column names would be '1', '2'.
dat = ascii.read(['1,2', '3,4'], names=('a', 'b'))
assert len(dat) == 2
assert dat.colnames == ['a', 'b']
# This is a Basic format table and the first row
# gives the column names 'c', 'd', which get replaced by 'a', 'b'
dat = ascii.read(['c,d', '3,4'], names=('a', 'b'))
assert len(dat) == 1
assert dat.colnames == ['a', 'b']
# This is also a Basic format table and the first row
# gives the column names 'c', 'd', which get replaced by 'a', 'b'
dat = ascii.read(['c d', 'e f'], names=('a', 'b'))
assert len(dat) == 1
assert dat.colnames == ['a', 'b']
def test_guess_with_format_arg():
"""
When the format or Reader is explicitly given then disable the
strict column name checking in guessing.
"""
dat = ascii.read(['1,2', '3,4'], format='basic')
assert len(dat) == 1
assert dat.colnames == ['1', '2']
dat = ascii.read(['1,2', '3,4'], names=('a', 'b'), format='basic')
assert len(dat) == 1
assert dat.colnames == ['a', 'b']
dat = ascii.read(['1,2', '3,4'], Reader=ascii.Basic)
assert len(dat) == 1
assert dat.colnames == ['1', '2']
dat = ascii.read(['1,2', '3,4'], names=('a', 'b'), Reader=ascii.Basic)
assert len(dat) == 1
assert dat.colnames == ['a', 'b']
# For good measure check the same in the unified I/O interface
dat = Table.read(['1,2', '3,4'], format='ascii.basic')
assert len(dat) == 1
assert dat.colnames == ['1', '2']
dat = Table.read(['1,2', '3,4'], format='ascii.basic', names=('a', 'b'))
assert len(dat) == 1
assert dat.colnames == ['a', 'b']
def test_guess_with_delimiter_arg():
"""
When the delimiter is explicitly given then do not try others in guessing.
"""
fields = ['10.1E+19', '3.14', '2048', '-23']
values = [1.01e20, 3.14, 2048, -23]
# Default guess should recognise CSV with optional spaces
t0 = ascii.read(asciiIO(', '.join(fields)), guess=True)
for n, v in zip(t0.colnames, values):
assert t0[n][0] == v
# Forcing space as delimiter produces type str columns ('10.1E+19,')
t1 = ascii.read(asciiIO(', '.join(fields)), guess=True, delimiter=' ')
for n, v in zip(t1.colnames[:-1], fields[:-1]):
assert t1[n][0] == v + ','
def test_reading_mixed_delimiter_tabs_spaces():
# Regression test for https://github.com/astropy/astropy/issues/6770
dat = ascii.read('1 2\t3\n1 2\t3', format='no_header', names=list('abc'))
assert len(dat) == 2
Table.read(['1 2\t3', '1 2\t3'], format='ascii.no_header',
names=['a', 'b', 'c'])
assert len(dat) == 2
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_read_with_names_arg(fast_reader):
"""
Test that a bad value of `names` raises an exception.
"""
# CParser only uses columns in `names` and thus reports mismatch in num_col
with pytest.raises(ascii.InconsistentTableError):
ascii.read(['c d', 'e f'], names=('a', ), guess=False, fast_reader=fast_reader)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
@pytest.mark.parametrize('path_format', ['plain', 'tilde-str', 'tilde-pathlib'])
def test_read_all_files(fast_reader, path_format, home_is_data):
for testfile in get_testfiles():
if testfile.get('skip'):
print(f"\n\n******** SKIPPING {testfile['name']}")
continue
if 'tilde' in path_format:
if 'str' in path_format:
testfile['name'] = '~/' + testfile['name'][5:]
else:
testfile['name'] = pathlib.Path('~/', testfile['name'][5:])
print(f"\n\n******** READING {testfile['name']}")
for guess in (True, False):
test_opts = testfile['opts'].copy()
if 'guess' not in test_opts:
test_opts['guess'] = guess
if ('Reader' in test_opts and f"fast_{test_opts['Reader']._format_name}"
in core.FAST_CLASSES): # has fast version
if 'Inputter' not in test_opts: # fast reader doesn't allow this
test_opts['fast_reader'] = fast_reader
table = ascii.read(testfile['name'], **test_opts)
assert_equal(table.dtype.names, testfile['cols'])
for colname in table.dtype.names:
assert_equal(len(table[colname]), testfile['nrows'])
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
@pytest.mark.parametrize('path_format', ['plain', 'tilde-str', 'tilde-pathlib'])
def test_read_all_files_via_table(fast_reader, path_format, home_is_data):
for testfile in get_testfiles():
if testfile.get('skip'):
print(f"\n\n******** SKIPPING {testfile['name']}")
continue
if 'tilde' in path_format:
if 'str' in path_format:
testfile['name'] = '~/' + testfile['name'][5:]
else:
testfile['name'] = pathlib.Path('~/', testfile['name'][5:])
print(f"\n\n******** READING {testfile['name']}")
for guess in (True, False):
test_opts = testfile['opts'].copy()
if 'guess' not in test_opts:
test_opts['guess'] = guess
if 'Reader' in test_opts:
format = f"ascii.{test_opts['Reader']._format_name}"
del test_opts['Reader']
else:
format = 'ascii'
if f'fast_{format}' in core.FAST_CLASSES:
test_opts['fast_reader'] = fast_reader
table = Table.read(testfile['name'], format=format, **test_opts)
assert_equal(table.dtype.names, testfile['cols'])
for colname in table.dtype.names:
assert_equal(len(table[colname]), testfile['nrows'])
def test_guess_all_files():
for testfile in get_testfiles():
if testfile.get('skip'):
print(f"\n\n******** SKIPPING {testfile['name']}")
continue
if not testfile['opts'].get('guess', True):
continue
print(f"\n\n******** READING {testfile['name']}")
for filter_read_opts in (['Reader', 'delimiter', 'quotechar'], []):
# Copy read options except for those in filter_read_opts
guess_opts = dict((k, v) for k, v in testfile['opts'].items()
if k not in filter_read_opts)
table = ascii.read(testfile['name'], guess=True, **guess_opts)
assert_equal(table.dtype.names, testfile['cols'])
for colname in table.dtype.names:
assert_equal(len(table[colname]), testfile['nrows'])
def test_validate_read_kwargs():
lines = ['a b', '1 2', '3 4']
# Check that numpy integers are allowed
out = ascii.read(lines, data_start=np.int16(2))
assert np.all(out['a'] == [3])
with pytest.raises(TypeError, match=r"read\(\) argument 'data_end' must be a "
r"<class 'int'> object, "
r"got <class 'str'> instead"):
ascii.read(lines, data_end='needs integer')
with pytest.raises(TypeError, match=r"read\(\) argument 'fill_include_names' must "
r"be a list-like object, got <class 'str'> instead"):
ascii.read(lines, fill_include_names='ID')
def test_daophot_indef():
"""Test that INDEF is correctly interpreted as a missing value"""
table = ascii.read('data/daophot2.dat', Reader=ascii.Daophot)
for col in table.itercols():
# Four columns have all INDEF values and are masked, rest are normal Column
if col.name in ('OTIME', 'MAG', 'MERR', 'XAIRMASS'):
assert np.all(col.mask)
else:
assert not hasattr(col, 'mask')
def test_daophot_types():
"""
Test specific data types which are different from what would be
inferred automatically based only data values. DAOphot reader uses
the header information to assign types.
"""
table = ascii.read('data/daophot2.dat', Reader=ascii.Daophot)
assert table['LID'].dtype.char in 'fd' # float or double
assert table['MAG'].dtype.char in 'fd' # even without any data values
assert table['PIER'].dtype.char in 'US' # string (data values are consistent with int)
assert table['ID'].dtype.char in 'il' # int or long
def test_daophot_header_keywords():
table = ascii.read('data/daophot.dat', Reader=ascii.Daophot)
expected_keywords = (('NSTARFILE', 'test.nst.1', 'filename', '%-23s'),
('REJFILE', '"hello world"', 'filename', '%-23s'),
('SCALE', '1.', 'units/pix', '%-23.7g'),)
keywords = table.meta['keywords'] # Ordered dict of keyword structures
for name, value, units, format_ in expected_keywords:
keyword = keywords[name]
assert_equal(keyword['value'], value)
assert_equal(keyword['units'], units)
assert_equal(keyword['format'], format_)
def test_daophot_multiple_aperture():
table = ascii.read('data/daophot3.dat', Reader=ascii.Daophot)
assert 'MAG5' in table.colnames # MAG5 is one of the newly created column names
assert table['MAG5'][4] == 22.13 # A sample entry in daophot3.dat file
assert table['MERR2'][0] == 1.171
assert np.all(table['RAPERT5'] == 23.3) # assert all the 5th apertures are same 23.3
def test_daophot_multiple_aperture2():
table = ascii.read('data/daophot4.dat', Reader=ascii.Daophot)
assert 'MAG15' in table.colnames # MAG15 is one of the newly created column name
assert table['MAG15'][1] == -7.573 # A sample entry in daophot4.dat file
assert table['MERR2'][0] == 0.049
assert np.all(table['RAPERT5'] == 5.) # assert all the 5th apertures are same 5.0
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_empty_table_no_header(fast_reader):
with pytest.raises(ascii.InconsistentTableError):
ascii.read('data/no_data_without_header.dat', Reader=ascii.NoHeader,
guess=False, fast_reader=fast_reader)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_wrong_quote(fast_reader):
with pytest.raises(ascii.InconsistentTableError):
ascii.read('data/simple.txt', guess=False, fast_reader=fast_reader)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_extra_data_col(fast_reader):
with pytest.raises(ascii.InconsistentTableError):
ascii.read('data/bad.txt', fast_reader=fast_reader)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_extra_data_col2(fast_reader):
with pytest.raises(ascii.InconsistentTableError):
ascii.read('data/simple5.txt', delimiter='|', fast_reader=fast_reader)
def test_missing_file():
with pytest.raises(OSError):
ascii.read('does_not_exist')
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_set_names(fast_reader):
names = ('c1', 'c2', 'c3', 'c4', 'c5', 'c6')
data = ascii.read('data/simple3.txt', names=names, delimiter='|',
fast_reader=fast_reader)
assert_equal(data.dtype.names, names)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_set_include_names(fast_reader):
names = ('c1', 'c2', 'c3', 'c4', 'c5', 'c6')
include_names = ('c1', 'c3')
data = ascii.read('data/simple3.txt', names=names, include_names=include_names,
delimiter='|', fast_reader=fast_reader)
assert_equal(data.dtype.names, include_names)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_set_exclude_names(fast_reader):
exclude_names = ('Y', 'object')
data = ascii.read('data/simple3.txt', exclude_names=exclude_names, delimiter='|',
fast_reader=fast_reader)
assert_equal(data.dtype.names, ('obsid', 'redshift', 'X', 'rad'))
def test_include_names_daophot():
include_names = ('ID', 'MAG', 'PIER')
data = ascii.read('data/daophot.dat', include_names=include_names)
assert_equal(data.dtype.names, include_names)
def test_exclude_names_daophot():
exclude_names = ('ID', 'YCENTER', 'MERR', 'NITER', 'CHI', 'PERROR')
data = ascii.read('data/daophot.dat', exclude_names=exclude_names)
assert_equal(data.dtype.names, ('XCENTER', 'MAG', 'MSKY', 'SHARPNESS', 'PIER'))
def test_custom_process_lines():
def process_lines(lines):
bars_at_ends = re.compile(r'^\| | \|$', re.VERBOSE)
striplines = (x.strip() for x in lines)
return [bars_at_ends.sub('', x) for x in striplines if len(x) > 0]
reader = ascii.get_reader(delimiter='|')
reader.inputter.process_lines = process_lines
data = reader.read('data/bars_at_ends.txt')
assert_equal(data.dtype.names, ('obsid', 'redshift', 'X', 'Y', 'object', 'rad'))
assert_equal(len(data), 3)
def test_custom_process_line():
def process_line(line):
line_out = re.sub(r'^\|\s*', '', line.strip())
return line_out
reader = ascii.get_reader(data_start=2, delimiter='|')
reader.header.splitter.process_line = process_line
reader.data.splitter.process_line = process_line
data = reader.read('data/nls1_stackinfo.dbout')
cols = get_testfiles('data/nls1_stackinfo.dbout')['cols']
assert_equal(data.dtype.names, cols[1:])
def test_custom_splitters():
reader = ascii.get_reader()
reader.header.splitter = ascii.BaseSplitter()
reader.data.splitter = ascii.BaseSplitter()
f = 'data/test4.dat'
data = reader.read(f)
testfile = get_testfiles(f)
assert_equal(data.dtype.names, testfile['cols'])
assert_equal(len(data), testfile['nrows'])
assert_almost_equal(data.field('zabs1.nh')[2], 0.0839710433091)
assert_almost_equal(data.field('p1.gamma')[2], 1.25997502704)
assert_almost_equal(data.field('p1.ampl')[2], 0.000696444029148)
assert_equal(data.field('statname')[2], 'chi2modvar')
assert_almost_equal(data.field('statval')[2], 497.56468441)
def test_start_end():
data = ascii.read('data/test5.dat', header_start=1, data_start=3, data_end=-5)
assert_equal(len(data), 13)
assert_equal(data.field('statname')[0], 'chi2xspecvar')
assert_equal(data.field('statname')[-1], 'chi2gehrels')
def test_set_converters():
converters = {'zabs1.nh': [ascii.convert_numpy('int32'),
ascii.convert_numpy('float32')],
'p1.gamma': [ascii.convert_numpy('str')]
}
data = ascii.read('data/test4.dat', converters=converters)
assert_equal(str(data['zabs1.nh'].dtype), 'float32')
assert_equal(data['p1.gamma'][0], '1.26764500000')
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_from_string(fast_reader):
f = 'data/simple.txt'
with open(f) as fd:
table = fd.read()
testfile = get_testfiles(f)[0]
data = ascii.read(table, fast_reader=fast_reader, **testfile['opts'])
assert_equal(data.dtype.names, testfile['cols'])
assert_equal(len(data), testfile['nrows'])
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_from_filelike(fast_reader):
f = 'data/simple.txt'
testfile = get_testfiles(f)[0]
with open(f, 'rb') as fd:
data = ascii.read(fd, fast_reader=fast_reader, **testfile['opts'])
assert_equal(data.dtype.names, testfile['cols'])
assert_equal(len(data), testfile['nrows'])
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_from_lines(fast_reader):
f = 'data/simple.txt'
with open(f) as fd:
table = fd.readlines()
testfile = get_testfiles(f)[0]
data = ascii.read(table, fast_reader=fast_reader, **testfile['opts'])
assert_equal(data.dtype.names, testfile['cols'])
assert_equal(len(data), testfile['nrows'])
def test_comment_lines():
table = ascii.get_reader(Reader=ascii.Rdb)
data = table.read('data/apostrophe.rdb')
assert_equal(table.comment_lines, ['# first comment', ' # second comment'])
assert_equal(data.meta['comments'], ['first comment', 'second comment'])
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_fill_values(fast_reader):
f = 'data/fill_values.txt'
testfile = get_testfiles(f)
data = ascii.read(f, fill_values=('a', '1'), fast_reader=fast_reader,
**testfile['opts'])
assert_true((data['a'].mask == [False, True]).all())
assert_true((data['a'] == [1, 1]).all())
assert_true((data['b'].mask == [False, True]).all())
assert_true((data['b'] == [2, 1]).all())
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_fill_values_col(fast_reader):
f = 'data/fill_values.txt'
testfile = get_testfiles(f)
data = ascii.read(f, fill_values=('a', '1', 'b'), fast_reader=fast_reader,
**testfile['opts'])
check_fill_values(data)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_fill_values_include_names(fast_reader):
f = 'data/fill_values.txt'
testfile = get_testfiles(f)
data = ascii.read(f, fill_values=('a', '1'), fast_reader=fast_reader,
fill_include_names=['b'], **testfile['opts'])
check_fill_values(data)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_fill_values_exclude_names(fast_reader):
f = 'data/fill_values.txt'
testfile = get_testfiles(f)
data = ascii.read(f, fill_values=('a', '1'), fast_reader=fast_reader,
fill_exclude_names=['a'], **testfile['opts'])
check_fill_values(data)
def check_fill_values(data):
"""compare array column by column with expectation """
assert not hasattr(data['a'], 'mask')
assert_true((data['a'] == ['1', 'a']).all())
assert_true((data['b'].mask == [False, True]).all())
# Check that masked value is "do not care" in comparison
assert_true((data['b'] == [2, -999]).all())
data['b'].mask = False # explicitly unmask for comparison
assert_true((data['b'] == [2, 1]).all())
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_fill_values_list(fast_reader):
f = 'data/fill_values.txt'
testfile = get_testfiles(f)
data = ascii.read(f, fill_values=[('a', '42'), ('1', '42', 'a')],
fast_reader=fast_reader, **testfile['opts'])
data['a'].mask = False # explicitly unmask for comparison
assert_true((data['a'] == [42, 42]).all())
def test_masking_Cds_Mrt():
f = 'data/cds.dat' # Tested for CDS and MRT
for testfile in get_testfiles(f):
data = ascii.read(f,
**testfile['opts'])
assert_true(data['AK'].mask[0])
assert not hasattr(data['Fit'], 'mask')
def test_null_Ipac():
f = 'data/ipac.dat'
testfile = get_testfiles(f)[0]
data = ascii.read(f, **testfile['opts'])
mask = np.array([(True, False, True, False, True),
(False, False, False, False, False)],
dtype=[('ra', '|b1'),
('dec', '|b1'),
('sai', '|b1'),
('v2', '|b1'),
('sptype', '|b1')])
assert np.all(data.mask == mask)
def test_Ipac_meta():
keywords = OrderedDict((('intval', 1),
('floatval', 2.3e3),
('date', "Wed Sp 20 09:48:36 1995"),
('key_continue', 'IPAC keywords can continue across lines')))
comments = ['This is an example of a valid comment']
f = 'data/ipac.dat'
testfile = get_testfiles(f)[0]
data = ascii.read(f, **testfile['opts'])
assert data.meta['keywords'].keys() == keywords.keys()
for data_kv, kv in zip(data.meta['keywords'].values(), keywords.values()):
assert data_kv['value'] == kv
assert data.meta['comments'] == comments
def test_set_guess_kwarg():
"""Read a file using guess with one of the typical guess_kwargs explicitly set."""
data = ascii.read('data/space_delim_no_header.dat',
delimiter=',', guess=True)
assert(data.dtype.names == ('1 3.4 hello',))
assert(len(data) == 1)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_read_rdb_wrong_type(fast_reader):
"""Read RDB data with inconsistent data type (except failure)"""
table = """col1\tcol2
N\tN
1\tHello"""
with pytest.raises(ValueError):
ascii.read(table, Reader=ascii.Rdb, fast_reader=fast_reader)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_default_missing(fast_reader):
"""Read a table with empty values and ensure that corresponding entries are masked"""
table = '\n'.join(['a,b,c,d',
'1,3,,',
'2, , 4.0 , ss '])
dat = ascii.read(table, fast_reader=fast_reader)
assert dat.masked is False
assert dat.pformat() == [' a b c d ',
'--- --- --- ---',
' 1 3 -- --',
' 2 -- 4.0 ss']
# Single row table with a single missing element
table = """ a \n "" """
dat = ascii.read(table, fast_reader=fast_reader)
assert dat.pformat() == [' a ',
'---',
' --']
assert dat['a'].dtype.kind == 'i'
# Same test with a fixed width reader
table = '\n'.join([' a b c d ',
'--- --- --- ---',
' 1 3 ',
' 2 4.0 ss'])
dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine)
assert dat.masked is False
assert dat.pformat() == [' a b c d ',
'--- --- --- ---',
' 1 3 -- --',
' 2 -- 4.0 ss']
dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine, fill_values=None)
assert dat.masked is False
assert dat.pformat() == [' a b c d ',
'--- --- --- ---',
' 1 3 ',
' 2 4.0 ss']
dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine, fill_values=[])
assert dat.masked is False
assert dat.pformat() == [' a b c d ',
'--- --- --- ---',
' 1 3 ',
' 2 4.0 ss']
def get_testfiles(name=None):
"""Set up information about the columns, number of rows, and reader params to
read a bunch of test files and verify columns and number of rows."""
testfiles = [
{'cols': ('agasc_id', 'n_noids', 'n_obs'),
'name': 'data/apostrophe.rdb',
'nrows': 2,
'opts': {'Reader': ascii.Rdb}},
{'cols': ('agasc_id', 'n_noids', 'n_obs'),
'name': 'data/apostrophe.tab',
'nrows': 2,
'opts': {'Reader': ascii.Tab}},
{'cols': ('Index',
'RAh',
'RAm',
'RAs',
'DE-',
'DEd',
'DEm',
'DEs',
'Match',
'Class',
'AK',
'Fit'),
'name': 'data/cds.dat',
'nrows': 1,
'opts': {'Reader': ascii.Cds}},
{'cols': ('Index',
'RAh',
'RAm',
'RAs',
'DE-',
'DEd',
'DEm',
'DEs',
'Match',
'Class',
'AK',
'Fit'),
'name': 'data/cds.dat',
'nrows': 1,
'opts': {'Reader': ascii.Mrt}},
# Test malformed CDS file (issues #2241 #467)
{'cols': ('Index',
'RAh',
'RAm',
'RAs',
'DE-',
'DEd',
'DEm',
'DEs',
'Match',
'Class',
'AK',
'Fit'),
'name': 'data/cds_malformed.dat',
'nrows': 1,
'opts': {'Reader': ascii.Cds, 'data_start': 'guess'}},
{'cols': ('a', 'b', 'c'),
'name': 'data/commented_header.dat',
'nrows': 2,
'opts': {'Reader': ascii.CommentedHeader}},
{'cols': ('a', 'b', 'c'),
'name': 'data/commented_header2.dat',
'nrows': 2,
'opts': {'Reader': ascii.CommentedHeader, 'header_start': -1}},
{'cols': ('col1', 'col2', 'col3', 'col4', 'col5'),
'name': 'data/continuation.dat',
'nrows': 2,
'opts': {'Inputter': ascii.ContinuationLinesInputter,
'Reader': ascii.NoHeader}},
{'cols': ('ID',
'XCENTER',
'YCENTER',
'MAG',
'MERR',
'MSKY',
'NITER',
'SHARPNESS',
'CHI',
'PIER',
'PERROR'),
'name': 'data/daophot.dat',
'nrows': 2,
'opts': {'Reader': ascii.Daophot}},
{'cols': ('NUMBER',
'FLUX_ISO',
'FLUXERR_ISO',
'VALU-ES',
'VALU-ES_1',
'FLAG'),
'name': 'data/sextractor.dat',
'nrows': 3,
'opts': {'Reader': ascii.SExtractor}},
{'cols': ('ra', 'dec', 'sai', 'v2', 'sptype'),
'name': 'data/ipac.dat',
'nrows': 2,
'opts': {'Reader': ascii.Ipac}},
{'cols': ('col0',
'objID',
'osrcid',
'xsrcid',
'SpecObjID',
'ra',
'dec',
'obsid',
'ccdid',
'z',
'modelMag_i',
'modelMagErr_i',
'modelMag_r',
'modelMagErr_r',
'expo',
'theta',
'rad_ecf_39',
'detlim90',
'fBlim90'),
'name': 'data/nls1_stackinfo.dbout',
'nrows': 58,
'opts': {'data_start': 2, 'delimiter': '|', 'guess': False}},
{'cols': ('Index',
'RAh',
'RAm',
'RAs',
'DE-',
'DEd',
'DEm',
'DEs',
'Match',
'Class',
'AK',
'Fit'),
'name': 'data/no_data_cds.dat',
'nrows': 0,
'opts': {'Reader': ascii.Cds}},
{'cols': ('Index',
'RAh',
'RAm',
'RAs',
'DE-',
'DEd',
'DEm',
'DEs',
'Match',
'Class',
'AK',
'Fit'),
'name': 'data/no_data_cds.dat',
'nrows': 0,
'opts': {'Reader': ascii.Mrt}},
{'cols': ('ID',
'XCENTER',
'YCENTER',
'MAG',
'MERR',
'MSKY',
'NITER',
'SHARPNESS',
'CHI',
'PIER',
'PERROR'),
'name': 'data/no_data_daophot.dat',
'nrows': 0,
'opts': {'Reader': ascii.Daophot}},
{'cols': ('NUMBER',
'FLUX_ISO',
'FLUXERR_ISO',
'VALUES',
'VALUES_1',
'FLAG'),
'name': 'data/no_data_sextractor.dat',
'nrows': 0,
'opts': {'Reader': ascii.SExtractor}},
{'cols': ('ra', 'dec', 'sai', 'v2', 'sptype'),
'name': 'data/no_data_ipac.dat',
'nrows': 0,
'opts': {'Reader': ascii.Ipac}},
{'cols': ('ra', 'v2'),
'name': 'data/ipac.dat',
'nrows': 2,
'opts': {'Reader': ascii.Ipac, 'include_names': ['ra', 'v2']}},
{'cols': ('a', 'b', 'c'),
'name': 'data/no_data_with_header.dat',
'nrows': 0,
'opts': {}},
{'cols': ('agasc_id', 'n_noids', 'n_obs'),
'name': 'data/short.rdb',
'nrows': 7,
'opts': {'Reader': ascii.Rdb}},
{'cols': ('agasc_id', 'n_noids', 'n_obs'),
'name': 'data/short.tab',
'nrows': 7,
'opts': {'Reader': ascii.Tab}},
{'cols': ('test 1a', 'test2', 'test3', 'test4'),
'name': 'data/simple.txt',
'nrows': 2,
'opts': {'quotechar': "'"}},
{'cols': ('top1', 'top2', 'top3', 'top4'),
'name': 'data/simple.txt',
'nrows': 1,
'opts': {'quotechar': "'", 'header_start': 1, 'data_start': 2}},
{'cols': ('top1', 'top2', 'top3', 'top4'),
'name': 'data/simple.txt',
'nrows': 1,
'opts': {'quotechar': "'", 'header_start': 1}},
{'cols': ('top1', 'top2', 'top3', 'top4'),
'name': 'data/simple.txt',
'nrows': 2,
'opts': {'quotechar': "'", 'header_start': 1, 'data_start': 1}},
{'cols': ('obsid', 'redshift', 'X', 'Y', 'object', 'rad'),
'name': 'data/simple2.txt',
'nrows': 3,
'opts': {'delimiter': '|'}},
{'cols': ('obsid', 'redshift', 'X', 'Y', 'object', 'rad'),
'name': 'data/simple3.txt',
'nrows': 2,
'opts': {'delimiter': '|'}},
{'cols': ('col1', 'col2', 'col3', 'col4', 'col5', 'col6'),
'name': 'data/simple4.txt',
'nrows': 3,
'opts': {'Reader': ascii.NoHeader, 'delimiter': '|'}},
{'cols': ('col1', 'col2', 'col3'),
'name': 'data/space_delim_no_header.dat',
'nrows': 2,
'opts': {'Reader': ascii.NoHeader}},
{'cols': ('col1', 'col2', 'col3'),
'name': 'data/space_delim_no_header.dat',
'nrows': 2,
'opts': {'Reader': ascii.NoHeader, 'header_start': None}},
{'cols': ('obsid', 'offset', 'x', 'y', 'name', 'oaa'),
'name': 'data/space_delim_blank_lines.txt',
'nrows': 3,
'opts': {}},
{'cols': ('zabs1.nh', 'p1.gamma', 'p1.ampl', 'statname', 'statval'),
'name': 'data/test4.dat',
'nrows': 9,
'opts': {}},
{'cols': ('a', 'b', 'c'),
'name': 'data/fill_values.txt',
'nrows': 2,
'opts': {'delimiter': ','}},
{'name': 'data/whitespace.dat',
'cols': ('quoted colname with tab\tinside', 'col2', 'col3'),
'nrows': 2,
'opts': {'delimiter': r'\s'}},
{'name': 'data/simple_csv.csv',
'cols': ('a', 'b', 'c'),
'nrows': 2,
'opts': {'Reader': ascii.Csv}},
{'name': 'data/simple_csv_missing.csv',
'cols': ('a', 'b', 'c'),
'nrows': 2,
'skip': True,
'opts': {'Reader': ascii.Csv}},
{'cols': ('cola', 'colb', 'colc'),
'name': 'data/latex1.tex',
'nrows': 2,
'opts': {'Reader': ascii.Latex}},
{'cols': ('Facility', 'Id', 'exposure', 'date'),
'name': 'data/latex2.tex',
'nrows': 3,
'opts': {'Reader': ascii.AASTex}},
{'cols': ('cola', 'colb', 'colc'),
'name': 'data/latex3.tex',
'nrows': 2,
'opts': {'Reader': ascii.Latex}},
{'cols': ('Col1', 'Col2', 'Col3', 'Col4'),
'name': 'data/fixed_width_2_line.txt',
'nrows': 2,
'opts': {'Reader': ascii.FixedWidthTwoLine}},
]
try:
import bs4 # noqa
testfiles.append({'cols': ('Column 1', 'Column 2', 'Column 3'),
'name': 'data/html.html',
'nrows': 3,
'opts': {'Reader': ascii.HTML}})
except ImportError:
pass
if name is not None:
# If there are multiple matches then return a list, else return just
# the one match.
out = [x for x in testfiles if x['name'] == name]
if len(out) == 1:
out = out[0]
else:
out = testfiles
return out
def test_header_start_exception():
'''Check certain Readers throw an exception if ``header_start`` is set
For certain Readers it does not make sense to set the ``header_start``, they
throw an exception if you try.
This was implemented in response to issue #885.
'''
for readerclass in [ascii.NoHeader, ascii.SExtractor, ascii.Ipac,
ascii.BaseReader, ascii.FixedWidthNoHeader,
ascii.Cds, ascii.Mrt, ascii.Daophot]:
with pytest.raises(ValueError):
ascii.core._get_reader(readerclass, header_start=5)
def test_csv_table_read():
"""
Check for a regression introduced by #1935. Pseudo-CSV file with
commented header line.
"""
lines = ['# a, b',
'1, 2',
'3, 4']
t = ascii.read(lines)
assert t.colnames == ['a', 'b']
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_overlapping_names(fast_reader):
"""
Check that the names argument list can overlap with the existing column names.
This tests the issue in #1991.
"""
t = ascii.read(['a b', '1 2'], names=['b', 'a'], fast_reader=fast_reader)
assert t.colnames == ['b', 'a']
def test_sextractor_units():
"""
Make sure that the SExtractor reader correctly inputs descriptions and units.
"""
table = ascii.read('data/sextractor2.dat', Reader=ascii.SExtractor, guess=False)
expected_units = [None, Unit('pix'), Unit('pix'), Unit('mag'),
Unit('mag'), None, Unit('pix**2'), Unit('m**(-6)'),
Unit('mag * arcsec**(-2)')]
expected_descrs = ['Running object number',
'Windowed position estimate along x',
'Windowed position estimate along y',
'Kron-like elliptical aperture magnitude',
'RMS error for AUTO magnitude',
'Extraction flags',
None,
'Barycenter position along MAMA x axis',
'Peak surface brightness above background']
for i, colname in enumerate(table.colnames):
assert table[colname].unit == expected_units[i]
assert table[colname].description == expected_descrs[i]
def test_sextractor_last_column_array():
"""
Make sure that the SExtractor reader handles the last column correctly when it is array-like.
"""
table = ascii.read('data/sextractor3.dat', Reader=ascii.SExtractor, guess=False)
expected_columns = ['X_IMAGE', 'Y_IMAGE', 'ALPHA_J2000', 'DELTA_J2000',
'MAG_AUTO', 'MAGERR_AUTO',
'MAG_APER', 'MAG_APER_1', 'MAG_APER_2', 'MAG_APER_3',
'MAG_APER_4', 'MAG_APER_5', 'MAG_APER_6',
'MAGERR_APER', 'MAGERR_APER_1', 'MAGERR_APER_2', 'MAGERR_APER_3',
'MAGERR_APER_4', 'MAGERR_APER_5', 'MAGERR_APER_6']
expected_units = [Unit('pix'), Unit('pix'), Unit('deg'), Unit('deg'),
Unit('mag'), Unit('mag'),
Unit('mag'), Unit('mag'), Unit('mag'),
Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'),
Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'),
Unit('mag'), Unit('mag')]
expected_descrs = ['Object position along x', None,
'Right ascension of barycenter (J2000)',
'Declination of barycenter (J2000)',
'Kron-like elliptical aperture magnitude',
'RMS error for AUTO magnitude', ] + [
'Fixed aperture magnitude vector'] * 7 + [
'RMS error vector for fixed aperture mag.'] * 7
for i, colname in enumerate(table.colnames):
assert table[colname].name == expected_columns[i]
assert table[colname].unit == expected_units[i]
assert table[colname].description == expected_descrs[i]
def test_list_with_newlines():
"""
Check that lists of strings where some strings consist of just a newline
("\n") are parsed correctly.
"""
t = ascii.read(["abc", "123\n", "456\n", "\n", "\n"])
assert t.colnames == ['abc']
assert len(t) == 2
assert t[0][0] == 123
assert t[1][0] == 456
def test_commented_csv():
"""
Check that Csv reader does not have ignore lines with the # comment
character which is defined for most Basic readers.
"""
t = ascii.read(['#a,b', '1,2', '#3,4'], format='csv')
assert t.colnames == ['#a', 'b']
assert len(t) == 2
assert t['#a'][1] == '#3'
def test_meta_comments():
"""
Make sure that line comments are included in the ``meta`` attribute
of the output Table.
"""
t = ascii.read(['#comment1', '# comment2 \t', 'a,b,c', '1,2,3'])
assert t.colnames == ['a', 'b', 'c']
assert t.meta['comments'] == ['comment1', 'comment2']
def test_guess_fail():
"""
Check the error message when guess fails
"""
with pytest.raises(ascii.InconsistentTableError) as err:
ascii.read('asfdasdf\n1 2 3', format='basic')
assert "** To figure out why the table did not read, use guess=False and" in str(err.value)
# Test the case with guessing enabled but for a format that has no free params
with pytest.raises(ValueError) as err:
ascii.read('asfdasdf\n1 2 3', format='ipac')
assert 'At least one header line beginning and ending with delimiter required' in str(err.value)
# Test the case with guessing enabled but with all params specified
with pytest.raises(ValueError) as err:
ascii.read('asfdasdf\n1 2 3', format='basic',
quotechar='"', delimiter=' ', fast_reader=False)
assert 'Number of header columns (1) inconsistent with data columns (3)' in str(err.value)
@pytest.mark.xfail('not HAS_BZ2')
def test_guessing_file_object():
"""
Test guessing a file object. Fixes #3013 and similar issue noted in #3019.
"""
with open('data/ipac.dat.bz2', 'rb') as fd:
t = ascii.read(fd)
assert t.colnames == ['ra', 'dec', 'sai', 'v2', 'sptype']
def test_pformat_roundtrip():
"""Check that the screen output of ``print tab`` can be read. See #3025."""
"""Read a table with empty values and ensure that corresponding entries are masked"""
table = '\n'.join(['a,b,c,d',
'1,3,1.11,1',
'2, 2, 4.0 , ss '])
dat = ascii.read(table)
out = ascii.read(dat.pformat())
assert len(dat) == len(out)
assert dat.colnames == out.colnames
for c in dat.colnames:
assert np.all(dat[c] == out[c])
def test_ipac_abbrev():
lines = ['| c1 | c2 | c3 | c4 | c5| c6 | c7 | c8 | c9|c10|c11|c12|',
'| r | rE | rea | real | D | do | dou | f | i | l | da| c |',
' 1 2 3 4 5 6 7 8 9 10 11 12 ']
dat = ascii.read(lines, format='ipac')
for name in dat.columns[0:8]:
assert dat[name].dtype.kind == 'f'
for name in dat.columns[8:10]:
assert dat[name].dtype.kind == 'i'
for name in dat.columns[10:12]:
assert dat[name].dtype.kind in ('U', 'S')
def test_almost_but_not_quite_daophot():
'''Regression test for #3319.
This tables looks so close to a daophot table, that the daophot reader gets
quite far before it fails with an AttributeError.
Note that this table will actually be read as Commented Header table with
the columns ['some', 'header', 'info'].
'''
lines = ["# some header info",
"#F header info beginning with 'F'",
"1 2 3",
"4 5 6",
"7 8 9"]
dat = ascii.read(lines)
assert len(dat) == 3
@pytest.mark.parametrize('fast', [False, 'force'])
def test_commented_header_comments(fast):
"""
Test that comments in commented_header are as expected with header_start
at different positions, and that the table round-trips.
"""
comments = ['comment 1', 'comment 2', 'comment 3']
lines = ['# a b',
'# comment 1',
'# comment 2',
'# comment 3',
'1 2',
'3 4']
dat = ascii.read(lines, format='commented_header', fast_reader=fast)
assert dat.meta['comments'] == comments
assert dat.colnames == ['a', 'b']
out = StringIO()
ascii.write(dat, out, format='commented_header', fast_writer=fast)
assert out.getvalue().splitlines() == lines
lines.insert(1, lines.pop(0))
dat = ascii.read(lines, format='commented_header', header_start=1, fast_reader=fast)
assert dat.meta['comments'] == comments
assert dat.colnames == ['a', 'b']
lines.insert(2, lines.pop(1))
dat = ascii.read(lines, format='commented_header', header_start=2, fast_reader=fast)
assert dat.meta['comments'] == comments
assert dat.colnames == ['a', 'b']
dat = ascii.read(lines, format='commented_header', header_start=-2, fast_reader=fast)
assert dat.meta['comments'] == comments
assert dat.colnames == ['a', 'b']
lines.insert(3, lines.pop(2))
dat = ascii.read(lines, format='commented_header', header_start=-1, fast_reader=fast)
assert dat.meta['comments'] == comments
assert dat.colnames == ['a', 'b']
lines = ['# a b',
'1 2',
'3 4']
dat = ascii.read(lines, format='commented_header', fast_reader=fast)
assert 'comments' not in dat.meta
assert dat.colnames == ['a', 'b']
def test_probably_html(home_is_data):
"""
Test the routine for guessing if a table input to ascii.read is probably HTML
"""
for tabl0 in ('data/html.html',
'~/html.html',
'http://blah.com/table.html',
'https://blah.com/table.html',
'file://blah/table.htm',
'ftp://blah.com/table.html',
'file://blah.com/table.htm',
' <! doctype html > hello world',
'junk < table baz> <tr foo > <td bar> </td> </tr> </table> junk',
['junk < table baz>', ' <tr foo >', ' <td bar> ', '</td> </tr>', '</table> junk'],
(' <! doctype html > ', ' hello world'),
):
assert _probably_html(tabl0) is True
for tabl0 in ('data/html.htms',
'Xhttp://blah.com/table.html',
' https://blah.com/table.htm',
'fole://blah/table.htm',
' < doctype html > hello world',
'junk < tble baz> <tr foo > <td bar> </td> </tr> </table> junk',
['junk < table baz>', ' <t foo >', ' <td bar> ', '</td> </tr>', '</table> junk'],
(' <! doctype htm > ', ' hello world'),
[[1, 2, 3]],
):
assert _probably_html(tabl0) is False
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_data_header_start(fast_reader):
tests = [(['# comment',
'',
' ',
'skip this line', # line 0
'a b', # line 1
'1 2'], # line 2
[{'header_start': 1},
{'header_start': 1, 'data_start': 2}
]
),
(['# comment',
'',
' \t',
'skip this line', # line 0
'a b', # line 1
'',
' \t',
'skip this line', # line 2
'1 2'], # line 3
[{'header_start': 1, 'data_start': 3}]),
(['# comment',
'',
' ',
'a b', # line 0
'',
' ',
'skip this line', # line 1
'1 2'], # line 2
[{'header_start': 0, 'data_start': 2},
{'data_start': 2}])]
for lines, kwargs_list in tests:
for kwargs in kwargs_list:
t = ascii.read(lines, format='basic', fast_reader=fast_reader,
guess=True, **kwargs)
assert t.colnames == ['a', 'b']
assert len(t) == 1
assert np.all(t['a'] == [1])
# Sanity check that the expected Reader is being used
assert get_read_trace()[-1]['kwargs']['Reader'] is (
ascii.Basic if (fast_reader is False) else ascii.FastBasic)
def test_table_with_no_newline():
"""
Test that an input file which is completely empty fails in the expected way.
Test that an input file with one line but no newline succeeds.
"""
# With guessing
table = BytesIO()
with pytest.raises(ascii.InconsistentTableError):
ascii.read(table)
# Without guessing
table = BytesIO()
with pytest.raises(ValueError) as err:
ascii.read(table, guess=False, fast_reader=False, format='basic')
assert 'No header line found' in str(err.value)
table = BytesIO()
t = ascii.read(table, guess=False, fast_reader=True, format='fast_basic')
assert not t and t.as_array().size == 0
# Put a single line of column names but with no newline
for kwargs in [dict(),
dict(guess=False, fast_reader=False, format='basic'),
dict(guess=False, fast_reader=True, format='fast_basic')]:
table = BytesIO()
table.write(b'a b')
t = ascii.read(table, **kwargs)
assert t.colnames == ['a', 'b']
assert len(t) == 0
def test_path_object():
fpath = pathlib.Path('data/simple.txt')
data = ascii.read(fpath)
assert len(data) == 2
assert sorted(list(data.columns)) == ['test 1a', 'test2', 'test3', 'test4']
assert data['test2'][1] == 'hat2'
def test_column_conversion_error():
"""
Test that context information (upstream exception message) from column
conversion error is provided.
"""
ipac = """\
| col0 |
| double |
1 2
"""
with pytest.raises(ValueError) as err:
ascii.read(ipac, guess=False, format='ipac')
assert 'Column col0 failed to convert:' in str(err.value)
with pytest.raises(ValueError) as err:
ascii.read(['a b', '1 2'], guess=False, format='basic', converters={'a': []})
assert 'no converters' in str(err.value)
def test_non_C_locale_with_fast_reader():
"""Test code that forces "C" locale while calling fast reader (#4364)"""
current = locale.setlocale(locale.LC_ALL)
try:
if platform.system() == 'Darwin':
locale.setlocale(locale.LC_ALL, 'fr_FR')
else:
locale.setlocale(locale.LC_ALL, 'fr_FR.utf8')
for fast_reader in (True,
False,
{'use_fast_converter': False},
{'use_fast_converter': True}):
t = ascii.read(['a b', '1.5 2'], format='basic', guess=False,
fast_reader=fast_reader)
assert t['a'].dtype.kind == 'f'
except locale.Error as e:
pytest.skip(f'Locale error: {e}')
finally:
locale.setlocale(locale.LC_ALL, current)
def test_no_units_for_char_columns():
'''Test that a char column of a Table is assigned no unit and not
a dimensionless unit.'''
t1 = Table([["A"]], names="B")
out = StringIO()
ascii.write(t1, out, format="ipac")
t2 = ascii.read(out.getvalue(), format="ipac", guess=False)
assert t2["B"].unit is None
def test_initial_column_fill_values():
"""Regression test for #5336, #5338."""
class TestHeader(ascii.BasicHeader):
def _set_cols_from_names(self):
self.cols = [ascii.Column(name=x) for x in self.names]
# Set some initial fill values
for col in self.cols:
col.fill_values = {'--': '0'}
class Tester(ascii.Basic):
header_class = TestHeader
reader = ascii.get_reader(Reader=Tester)
assert reader.read("""# Column definition is the first uncommented line
# Default delimiter is the space character.
a b c
# Data starts after the header column definition, blank lines ignored
-- 2 3
4 5 6 """)['a'][0] is np.ma.masked
def test_latex_no_trailing_backslash():
"""
Test that latex/aastex file with no trailing backslash can be read.
"""
lines = r"""
\begin{table}
\begin{tabular}{ccc}
a & b & c \\
1 & 1.0 & c \\ % comment
3\% & 3.0 & e % comment
\end{tabular}
\end{table}
"""
dat = ascii.read(lines, format='latex')
assert dat.colnames == ['a', 'b', 'c']
assert np.all(dat['a'] == ['1', r'3\%'])
assert np.all(dat['c'] == ['c', 'e'])
def text_aastex_no_trailing_backslash():
lines = r"""
\begin{deluxetable}{ccc}
\tablehead{\colhead{a} & \colhead{b} & \colhead{c}}
\startdata
1 & 1.0 & c \\
2 & 2.0 & d \\ % comment
3\% & 3.0 & e % comment
\enddata
\end{deluxetable}
"""
dat = ascii.read(lines, format='aastex')
assert dat.colnames == ['a', 'b', 'c']
assert np.all(dat['a'] == ['1', r'3\%'])
assert np.all(dat['c'] == ['c', 'e'])
@pytest.mark.parametrize('encoding', ['utf8', 'latin1', 'cp1252'])
def test_read_with_encoding(tmpdir, encoding):
data = {
'commented_header': '# à b è \n 1 2 héllo',
'csv': 'à,b,è\n1,2,héllo'
}
testfile = str(tmpdir.join('test.txt'))
for fmt, content in data.items():
with open(testfile, 'w', encoding=encoding) as f:
f.write(content)
table = ascii.read(testfile, encoding=encoding)
assert table.pformat() == [' à b è ',
'--- --- -----',
' 1 2 héllo']
for guess in (True, False):
table = ascii.read(testfile, format=fmt, fast_reader=False,
encoding=encoding, guess=guess)
assert table['è'].dtype.kind == 'U'
assert table.pformat() == [' à b è ',
'--- --- -----',
' 1 2 héllo']
def test_unsupported_read_with_encoding(tmpdir):
# Fast reader is not supported, make sure it raises an exception
with pytest.raises(ascii.ParameterError):
ascii.read('data/simple3.txt', guess=False, fast_reader='force',
encoding='latin1', format='fast_csv')
def test_read_chunks_input_types():
"""
Test chunked reading for different input types: file path, file object,
and string input.
"""
fpath = 'data/test5.dat'
t1 = ascii.read(fpath, header_start=1, data_start=3, )
with open(fpath, 'r') as fd1, open(fpath, 'r') as fd2:
for fp in (fpath, fd1, fd2.read()):
t_gen = ascii.read(fp, header_start=1, data_start=3,
guess=False, format='fast_basic',
fast_reader={'chunk_size': 400, 'chunk_generator': True})
ts = list(t_gen)
for t in ts:
for col, col1 in zip(t.columns.values(), t1.columns.values()):
assert col.name == col1.name
assert col.dtype.kind == col1.dtype.kind
assert len(ts) == 4
t2 = table.vstack(ts)
assert np.all(t1 == t2)
with open(fpath, 'r') as fd1, open(fpath, 'r') as fd2:
for fp in (fpath, fd1, fd2.read()):
# Now read the full table in chunks
t3 = ascii.read(fp, header_start=1, data_start=3,
fast_reader={'chunk_size': 300})
assert np.all(t1 == t3)
@pytest.mark.parametrize('masked', [True, False])
def test_read_chunks_formats(masked):
"""
Test different supported formats for chunked reading.
"""
t1 = simple_table(size=102, cols=10, kinds='fS', masked=masked)
for i, name in enumerate(t1.colnames):
t1.rename_column(name, f'col{i + 1}')
# TO DO commented_header does not currently work due to the special-cased
# implementation of header parsing.
for format in 'tab', 'csv', 'no_header', 'rdb', 'basic':
out = StringIO()
ascii.write(t1, out, format=format)
t_gen = ascii.read(out.getvalue(), format=format,
fast_reader={'chunk_size': 400, 'chunk_generator': True})
ts = list(t_gen)
for t in ts:
for col, col1 in zip(t.columns.values(), t1.columns.values()):
assert col.name == col1.name
assert col.dtype.kind == col1.dtype.kind
assert len(ts) > 4
t2 = table.vstack(ts)
assert np.all(t1 == t2)
# Now read the full table in chunks
t3 = ascii.read(out.getvalue(), format=format, fast_reader={'chunk_size': 400})
assert np.all(t1 == t3)
def test_read_chunks_chunk_size_too_small():
fpath = 'data/test5.dat'
with pytest.raises(ValueError) as err:
ascii.read(fpath, header_start=1, data_start=3,
fast_reader={'chunk_size': 10})
assert 'no newline found in chunk (chunk_size too small?)' in str(err.value)
def test_read_chunks_table_changes():
"""Column changes type or size between chunks. This also tests the case with
no final newline.
"""
col = ['a b c'] + ['1.12334 xyz a'] * 50 + ['abcdefg 555 abc'] * 50
table = '\n'.join(col)
t1 = ascii.read(table, guess=False)
t2 = ascii.read(table, fast_reader={'chunk_size': 100})
# This also confirms that the dtypes are exactly the same, i.e.
# the string itemsizes are the same.
assert np.all(t1 == t2)
def test_read_non_ascii():
"""Test that pure-Python reader is used in case the file contains non-ASCII characters
in it.
"""
table = Table.read(['col1, col2', '\u2119, \u01b4', '1, 2'], format='csv')
assert np.all(table['col1'] == ['\u2119', '1'])
assert np.all(table['col2'] == ['\u01b4', '2'])
@pytest.mark.parametrize('enable', [True, False, 'force'])
def test_kwargs_dict_guess(enable):
"""Test that fast_reader dictionary is preserved through guessing sequence.
"""
# Fails for enable=(True, 'force') - #5578
ascii.read('a\tb\n 1\t2\n3\t 4.0', fast_reader=dict(enable=enable))
assert get_read_trace()[-1]['kwargs']['Reader'] is (
ascii.Tab if (enable is False) else ascii.FastTab)
for k in get_read_trace():
if not k.get('status', 'Disabled').startswith('Disabled'):
assert k.get('kwargs').get('fast_reader').get('enable') is enable
def _get_lines(rdb):
lines = ['a a_2 a_1 a a']
if rdb:
lines += ['N N N N N']
lines += ['1 2 3 4 5', '10 20 30 40 50']
if rdb:
lines = ['\t'.join(line.split()) for line in lines]
return lines
@pytest.mark.parametrize('rdb', [False, True])
@pytest.mark.parametrize('fast_reader', [False, 'force'])
def test_deduplicate_names_basic(rdb, fast_reader):
"""Test that duplicate column names are successfully de-duplicated for the
basic format. Skip the case of rdb=True and fast_reader='force' when selecting
include_names, since that fails and is tested below.
"""
lines = _get_lines(rdb)
dat = ascii.read(lines, fast_reader=fast_reader)
assert dat.colnames == ['a', 'a_2', 'a_1', 'a_3', 'a_4']
assert len(dat) == 2
dat = ascii.read(lines, fast_reader=fast_reader, include_names=['a', 'a_2', 'a_3'])
assert len(dat) == 2
assert dat.colnames == ['a', 'a_2', 'a_3']
assert np.all(dat['a'] == [1, 10])
assert np.all(dat['a_2'] == [2, 20])
assert np.all(dat['a_3'] == [4, 40])
dat = ascii.read(lines, fast_reader=fast_reader,
names=['b1', 'b2', 'b3', 'b4', 'b5'],
include_names=['b1', 'b2', 'a_4', 'b4'])
assert len(dat) == 2
assert dat.colnames == ['b1', 'b2', 'b4']
assert np.all(dat['b1'] == [1, 10])
assert np.all(dat['b2'] == [2, 20])
assert np.all(dat['b4'] == [4, 40])
dat = ascii.read(lines, fast_reader=fast_reader,
names=['b1', 'b2', 'b3', 'b4', 'b5'],
exclude_names=['b3', 'b5', 'a_3', 'a_4'])
assert len(dat) == 2
assert dat.colnames == ['b1', 'b2', 'b4']
assert np.all(dat['b1'] == [1, 10])
assert np.all(dat['b2'] == [2, 20])
assert np.all(dat['b4'] == [4, 40])
def test_include_names_rdb_fast():
"""Test that selecting column names via `include_names` works for the RDB format
with fast reader. This is testing the fix for a bug identified in #9939.
"""
lines = _get_lines(True)
lines[0] = 'a\ta_2\ta_1\ta_3\ta_4'
dat = ascii.read(lines, fast_reader='force', include_names=['a', 'a_2', 'a_3'])
assert len(dat) == 2
assert dat['a'].dtype == int
assert dat['a_2'].dtype == int
@pytest.mark.parametrize('fast_reader', [False, 'force'])
def test_deduplicate_names_with_types(fast_reader):
"""Test that on selecting column names via `include_names` in the RDB format with
different types and duplicate column names type assignment is correctly preserved.
"""
lines = _get_lines(True)
lines[1] = 'N\tN\tN\tS\tS'
dat = ascii.read(lines, fast_reader=fast_reader, include_names=['a', 'a_2', 'a_3'])
assert len(dat) == 2
assert dat['a_2'].dtype.kind == 'i'
assert dat['a_3'].dtype.kind == 'U'
dat = ascii.read(lines, fast_reader=fast_reader, names=['b1', 'b2', 'b3', 'b4', 'b5'],
include_names=['a1', 'a_2', 'b1', 'b2', 'b4'])
assert len(dat) == 2
assert dat.colnames == ['b1', 'b2', 'b4']
assert dat['b2'].dtype.kind == 'i'
assert dat['b4'].dtype.kind == 'U'
@pytest.mark.parametrize('rdb', [False, True])
@pytest.mark.parametrize('fast_reader', [False, 'force'])
def test_set_invalid_names(rdb, fast_reader):
"""Test exceptions for invalid (duplicate or `None`) names specified via argument."""
lines = _get_lines(rdb)
if rdb:
fmt = 'rdb'
else:
fmt = 'basic'
with pytest.raises(ValueError) as err:
ascii.read(lines, fast_reader=fast_reader, format=fmt, guess=rdb,
names=['b1', 'b2', 'b1', 'b4', 'b5'])
assert 'Duplicate column names' in str(err.value)
with pytest.raises(TypeError) as err:
ascii.read(lines, fast_reader=fast_reader, format=fmt, guess=rdb,
names=['b1', 'b2', 'b1', None, None])
assert 'Cannot have None for column name' in str(err.value)
def test_read_masked_bool():
txt = """\
col0 col1
1 1
0 2
True 3
"" 4
False 5
"""
# Reading without converters returns col0 as a string
dat = ascii.read(txt, format='basic')
col = dat['col0']
assert isinstance(col, MaskedColumn)
assert col.dtype.kind == 'U'
assert col[0] == "1"
# Force col0 to be read as bool
converters = {'col0': [convert_numpy(bool)]}
dat = ascii.read(txt, format='basic', converters=converters)
col = dat['col0']
assert isinstance(col, MaskedColumn)
assert col.dtype.kind == 'b'
assert np.all(col.mask == [False, False, False, True, False])
assert np.all(col == [True, False, True, False, False])
def test_read_converters_wildcard():
'''Test converters where the column name is specified with
a wildcard.
'''
converters = {'F*': [ascii.convert_numpy(np.float32)]}
t = ascii.read(['Fabc Iabc', '1 2'], converters=converters)
assert np.issubdtype(t['Fabc'].dtype, np.float32)
assert not np.issubdtype(t['Iabc'].dtype, np.float32)
def test_read_converters_simplified():
"""Test providing io.ascii read converters as type or dtypes instead of
convert_numpy(type) outputs"""
t = Table()
t['a'] = [1, 2]
t['b'] = [3.5, 4]
t['c'] = ['True', 'False']
t['d'] = ['true', 'false'] # Looks kindof like boolean but actually a string
t['e'] = [5, 6]
out = StringIO()
t.write(out, format='ascii.basic')
converters = {'a': str, 'e': np.float32}
t2 = Table.read(out.getvalue(), format='ascii.basic', converters=converters)
assert t2.pformat(show_dtype=True) == [
' a b c d e ',
'str1 float64 str5 str5 float32',
'---- ------- ----- ----- -------',
' 1 3.5 True true 5.0',
' 2 4.0 False false 6.0'
]
converters = {'a': float, '*': [np.int64, float, bool, str]}
t2 = Table.read(out.getvalue(), format='ascii.basic', converters=converters)
assert t2.pformat_all(show_dtype=True) == [
' a b c d e ',
'float64 float64 bool str5 int64',
'------- ------- ----- ----- -----',
' 1.0 3.5 True true 5',
' 2.0 4.0 False false 6'
]
# Test failures
for converters in ({'*': [int, 1, bool, str]}, # bad converter type
# Tuple converter where 2nd element is not a subclass of NoType
{'a': [(int, int)]},
# Tuple converter with 3 elements not 2
{'a': [(int, int, int)]}):
with pytest.raises(ValueError, match='Error: invalid format for converters'):
t2 = Table.read(out.getvalue(), format='ascii.basic',
converters=converters, guess=False)
|
11b3f5e29a6c6c8020186ff1c53c72bffe4ffce6fd8d55599facf7c733d1c28d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import copy
from contextlib import nullcontext
from io import StringIO
from itertools import chain
import pathlib
import pytest
import numpy as np
from astropy.io import ascii
from astropy import table
from astropy.table.table_helpers import simple_table
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.compat.optional_deps import HAS_BS4
from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH
from astropy import units as u
from .common import setup_function, teardown_function # noqa
if HAS_BS4:
from bs4 import BeautifulSoup, FeatureNotFound # noqa
test_defs = [
dict(kwargs=dict(),
out="""\
ID XCENTER YCENTER MAG MERR MSKY NITER SHARPNESS CHI PIER PERROR
14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error
18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error
"""
),
dict(kwargs=dict(delimiter=None),
out="""\
ID XCENTER YCENTER MAG MERR MSKY NITER SHARPNESS CHI PIER PERROR
14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error
18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error
"""
),
dict(kwargs=dict(formats={'XCENTER': '%12.1f',
'YCENTER': '{0:.1f}'},
include_names=['XCENTER', 'YCENTER'],
strip_whitespace=False),
out="""\
XCENTER YCENTER
" 138.5" 256.4
" 18.1" 280.2
"""
),
dict(kwargs=dict(Writer=ascii.Rdb, exclude_names=['CHI']),
out="""\
ID\tXCENTER\tYCENTER\tMAG\tMERR\tMSKY\tNITER\tSHARPNESS\tPIER\tPERROR
N\tN\tN\tN\tN\tN\tN\tN\tN\tS
14\t138.538\t256.405\t15.461\t0.003\t34.85955\t4\t-0.032\t0\tNo_error
18\t18.114\t280.170\t22.329\t0.206\t30.12784\t4\t-2.544\t0\tNo_error
"""
),
dict(kwargs=dict(Writer=ascii.Tab),
out="""\
ID\tXCENTER\tYCENTER\tMAG\tMERR\tMSKY\tNITER\tSHARPNESS\tCHI\tPIER\tPERROR
14\t138.538\t256.405\t15.461\t0.003\t34.85955\t4\t-0.032\t0.802\t0\tNo_error
18\t18.114\t280.170\t22.329\t0.206\t30.12784\t4\t-2.544\t1.104\t0\tNo_error
"""
),
dict(kwargs=dict(Writer=ascii.Csv),
out="""\
ID,XCENTER,YCENTER,MAG,MERR,MSKY,NITER,SHARPNESS,CHI,PIER,PERROR
14,138.538,256.405,15.461,0.003,34.85955,4,-0.032,0.802,0,No_error
18,18.114,280.170,22.329,0.206,30.12784,4,-2.544,1.104,0,No_error
"""
),
dict(kwargs=dict(Writer=ascii.NoHeader),
out="""\
14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error
18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error
"""
),
dict(kwargs=dict(Writer=ascii.CommentedHeader),
out="""\
# ID XCENTER YCENTER MAG MERR MSKY NITER SHARPNESS CHI PIER PERROR
14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error
18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error
"""
),
dict(kwargs=dict(Writer=ascii.CommentedHeader, comment='&'),
out="""\
&ID XCENTER YCENTER MAG MERR MSKY NITER SHARPNESS CHI PIER PERROR
14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error
18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error
"""
),
dict(kwargs=dict(Writer=ascii.Latex),
out="""\
\\begin{table}
\\begin{tabular}{ccccccccccc}
ID & XCENTER & YCENTER & MAG & MERR & MSKY & NITER & SHARPNESS & CHI & PIER & PERROR \\\\
& pixels & pixels & magnitudes & magnitudes & counts & & & & & perrors \\\\
14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\
18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error \\\\
\\end{tabular}
\\end{table}
"""
),
dict(kwargs=dict(Writer=ascii.AASTex),
out="""\
\\begin{deluxetable}{ccccccccccc}
\\tablehead{\\colhead{ID} & \\colhead{XCENTER} & \\colhead{YCENTER} & \\colhead{MAG} & \\colhead{MERR} & \\colhead{MSKY} & \\colhead{NITER} & \\colhead{SHARPNESS} & \\colhead{CHI} & \\colhead{PIER} & \\colhead{PERROR}\\\\ \\colhead{ } & \\colhead{pixels} & \\colhead{pixels} & \\colhead{magnitudes} & \\colhead{magnitudes} & \\colhead{counts} & \\colhead{ } & \\colhead{ } & \\colhead{ } & \\colhead{ } & \\colhead{perrors}}
\\startdata
14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\
18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error
\\enddata
\\end{deluxetable}
""" # noqa
),
dict(
kwargs=dict(Writer=ascii.AASTex, caption='Mag values \\label{tab1}', latexdict={
'units': {'MAG': '[mag]', 'XCENTER': '[pixel]'}, 'tabletype': 'deluxetable*',
'tablealign': 'htpb'}),
out="""\
\\begin{deluxetable*}{ccccccccccc}[htpb]
\\tablecaption{Mag values \\label{tab1}}
\\tablehead{\\colhead{ID} & \\colhead{XCENTER} & \\colhead{YCENTER} & \\colhead{MAG} & \\colhead{MERR} & \\colhead{MSKY} & \\colhead{NITER} & \\colhead{SHARPNESS} & \\colhead{CHI} & \\colhead{PIER} & \\colhead{PERROR}\\\\ \\colhead{ } & \\colhead{[pixel]} & \\colhead{pixels} & \\colhead{[mag]} & \\colhead{magnitudes} & \\colhead{counts} & \\colhead{ } & \\colhead{ } & \\colhead{ } & \\colhead{ } & \\colhead{perrors}}
\\startdata
14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\
18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error
\\enddata
\\end{deluxetable*}
""" # noqa
),
dict(
kwargs=dict(Writer=ascii.Latex, caption='Mag values \\label{tab1}',
latexdict={'preamble': '\\begin{center}', 'tablefoot': '\\end{center}',
'data_end': ['\\hline', '\\hline'],
'units':{'MAG': '[mag]', 'XCENTER': '[pixel]'},
'tabletype': 'table*',
'tablealign': 'h'},
col_align='|lcccccccccc|'),
out="""\
\\begin{table*}[h]
\\begin{center}
\\caption{Mag values \\label{tab1}}
\\begin{tabular}{|lcccccccccc|}
ID & XCENTER & YCENTER & MAG & MERR & MSKY & NITER & SHARPNESS & CHI & PIER & PERROR \\\\
& [pixel] & pixels & [mag] & magnitudes & counts & & & & & perrors \\\\
14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\
18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error \\\\
\\hline
\\hline
\\end{tabular}
\\end{center}
\\end{table*}
"""
),
dict(kwargs=dict(Writer=ascii.Latex, latexdict=ascii.latexdicts['template']),
out="""\
\\begin{tabletype}[tablealign]
preamble
\\caption{caption}
\\begin{tabular}{col_align}
header_start
ID & XCENTER & YCENTER & MAG & MERR & MSKY & NITER & SHARPNESS & CHI & PIER & PERROR \\\\
& pixels & pixels & magnitudes & magnitudes & counts & & & & & perrors \\\\
header_end
data_start
14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\
18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error \\\\
data_end
\\end{tabular}
tablefoot
\\end{tabletype}
"""
),
dict(kwargs=dict(Writer=ascii.Latex, latexdict={'tabletype': None}),
out="""\
\\begin{tabular}{ccccccccccc}
ID & XCENTER & YCENTER & MAG & MERR & MSKY & NITER & SHARPNESS & CHI & PIER & PERROR \\\\
& pixels & pixels & magnitudes & magnitudes & counts & & & & & perrors \\\\
14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\
18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error \\\\
\\end{tabular}
"""
),
dict(kwargs=dict(Writer=ascii.HTML, htmldict={'css': 'table,th,td{border:1px solid black;'}),
out="""\
<html>
<head>
<meta charset="utf-8"/>
<meta content="text/html;charset=UTF-8" http-equiv="Content-type"/>
<style>
table,th,td{border:1px solid black; </style>
</head>
<body>
<table>
<thead>
<tr>
<th>ID</th>
<th>XCENTER</th>
<th>YCENTER</th>
<th>MAG</th>
<th>MERR</th>
<th>MSKY</th>
<th>NITER</th>
<th>SHARPNESS</th>
<th>CHI</th>
<th>PIER</th>
<th>PERROR</th>
</tr>
</thead>
<tr>
<td>14</td>
<td>138.538</td>
<td>256.405</td>
<td>15.461</td>
<td>0.003</td>
<td>34.85955</td>
<td>4</td>
<td>-0.032</td>
<td>0.802</td>
<td>0</td>
<td>No_error</td>
</tr>
<tr>
<td>18</td>
<td>18.114</td>
<td>280.170</td>
<td>22.329</td>
<td>0.206</td>
<td>30.12784</td>
<td>4</td>
<td>-2.544</td>
<td>1.104</td>
<td>0</td>
<td>No_error</td>
</tr>
</table>
</body>
</html>
"""
),
dict(kwargs=dict(Writer=ascii.Ipac),
out="""\
\\MERGERAD='INDEF'
\\IRAF='NOAO/IRAFV2.10EXPORT'
\\USER=''
\\HOST='tucana'
\\DATE='05-28-93'
\\TIME='14:46:13'
\\PACKAGE='daophot'
\\TASK='nstar'
\\IMAGE='test'
\\GRPFILE='test.psg.1'
\\PSFIMAGE='test.psf.1'
\\NSTARFILE='test.nst.1'
\\REJFILE='"hello world"'
\\SCALE='1.'
\\DATAMIN='50.'
\\DATAMAX='24500.'
\\GAIN='1.'
\\READNOISE='0.'
\\OTIME='00:07:59.0'
\\XAIRMASS='1.238106'
\\IFILTER='V'
\\RECENTER='yes'
\\FITSKY='no'
\\PSFMAG='16.594'
\\PSFRAD='5.'
\\FITRAD='3.'
\\MAXITER='50'
\\MAXGROUP='60'
\\FLATERROR='0.75'
\\PROFERROR='5.'
\\CLIPEXP='6'
\\CLIPRANGE='2.5'
| ID| XCENTER| YCENTER| MAG| MERR| MSKY| NITER| SHARPNESS| CHI| PIER| PERROR|
| long| double| double| double| double| double| long| double| double| long| char|
| | pixels| pixels| magnitudes| magnitudes| counts| | | | | perrors|
| null| null| null| null| null| null| null| null| null| null| null|
14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error
18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error
""" # noqa
),
]
test_defs_no_data = [
dict(kwargs=dict(Writer=ascii.Ipac),
out="""\
\\ This is an example of a valid comment.
\\ The 2nd data line is used to verify the exact column parsing
\\ (unclear if this is a valid for the IPAC format)
\\catalog='sao'
\\date='Wed Sp 20 09:48:36 1995'
\\mykeyword='Another way for defining keyvalue string'
| ra| dec| sai| v2|sptype|
|double|double|long|double| char|
| unit| unit|unit| unit| ergs|
| null| null|null| null| null|
"""
),
]
tab_to_fill = ['a b c', '1 2 3', '1 1 3']
test_defs_fill_value = [
dict(kwargs=dict(),
out="""\
a b c
1 2 3
1 1 3
"""
),
dict(kwargs=dict(fill_values=('1', 'w')),
out="""\
a b c
w 2 3
w w 3
"""
),
dict(kwargs=dict(fill_values=('1', 'w', 'b')),
out="""\
a b c
1 2 3
1 w 3
"""
),
dict(kwargs=dict(fill_values=('1', 'w'),
fill_include_names=['b']),
out="""\
a b c
1 2 3
1 w 3
"""
),
dict(kwargs=dict(fill_values=('1', 'w'),
fill_exclude_names=['a']),
out="""\
a b c
1 2 3
1 w 3
"""
),
dict(kwargs=dict(fill_values=('1', 'w'),
fill_include_names=['a'],
fill_exclude_names=['a', 'b']),
out="""\
a b c
1 2 3
1 1 3
"""
),
dict(kwargs=dict(fill_values=[('1', 'w')],
formats={'a': '%4.2f'}),
out="""\
a b c
1.00 2 3
1.00 w 3
"""
),
]
test_def_masked_fill_value = [
dict(kwargs=dict(),
out="""\
a b c
"" 2 3
1 1 ""
"""
),
dict(kwargs=dict(fill_values=[('1', 'w'), (ascii.masked, 'X')]),
out="""\
a b c
X 2 3
w w X
"""
),
dict(kwargs=dict(fill_values=[('1', 'w'), (ascii.masked, 'XXX')],
formats={'a': '%4.1f'}),
out="""\
a b c
XXX 2 3
1.0 w XXX
"""
),
dict(kwargs=dict(Writer=ascii.Csv),
out="""\
a,b,c
,2,3
1,1,
"""
),
]
@pytest.fixture
def home_is_tmpdir(monkeypatch, tmpdir):
"""
Pytest fixture to run a test case with tilde-prefixed paths.
In the tilde-path case, environment variables are temporarily
modified so that '~' resolves to the temp directory.
"""
# For Unix
monkeypatch.setenv('HOME', str(tmpdir))
# For Windows
monkeypatch.setenv('USERPROFILE', str(tmpdir))
def check_write_table(test_def, table, fast_writer, out=None):
if out is None:
out = StringIO()
try:
ascii.write(table, out, fast_writer=fast_writer, **test_def['kwargs'])
except ValueError as e: # if format doesn't have a fast writer, ignore
if 'not in the list of formats with fast writers' not in str(e.value):
raise e
return
if isinstance(out, StringIO):
# Output went to a buffer
actual = out.getvalue()
else:
# Output went to a file
if str(out).startswith('~'):
# Ensure a file hasn't been accidentally written to a literal tilde
# path
assert not os.path.exists(out)
out = os.path.expanduser(out)
assert os.path.exists(out)
with open(out) as f:
actual = f.read()
os.remove(out)
print(f"Expected:\n{test_def['out']}")
print(f'Actual:\n{actual}')
assert [x.strip() for x in actual.strip().splitlines()] == [
x.strip() for x in test_def['out'].strip().splitlines()]
def check_write_table_via_table(test_def, table, fast_writer, out=None):
if out is None:
out = StringIO()
test_def = copy.deepcopy(test_def)
if 'Writer' in test_def['kwargs']:
format = f"ascii.{test_def['kwargs']['Writer']._format_name}"
del test_def['kwargs']['Writer']
else:
format = 'ascii'
try:
table.write(out, format=format, fast_writer=fast_writer, **test_def['kwargs'])
except ValueError as e: # if format doesn't have a fast writer, ignore
if 'not in the list of formats with fast writers' not in str(e.value):
raise e
return
if isinstance(out, StringIO):
# Output went to a buffer
actual = out.getvalue()
else:
# Output went to a file
if str(out).startswith('~'):
# Ensure a file hasn't been accidentally written to a literal tilde
# path
assert not os.path.exists(out)
out = os.path.expanduser(out)
assert os.path.exists(out)
with open(out) as f:
actual = f.read()
os.remove(out)
print(f"Expected:\n{test_def['out']}")
print(f'Actual:\n{actual}')
assert [x.strip() for x in actual.strip().splitlines()] == [
x.strip() for x in test_def['out'].strip().splitlines()]
@pytest.mark.parametrize("fast_writer", [True, False])
@pytest.mark.parametrize('path_format',
['buffer', 'plain', 'tilde-str', 'tilde-pathlib'])
def test_write_table(
fast_writer, tmpdir, home_is_tmpdir, path_format):
table = ascii.get_reader(Reader=ascii.Daophot)
data = table.read('data/daophot.dat')
if path_format == 'buffer':
out_name = None
elif path_format == 'plain':
out_name = os.path.join(tmpdir, 'table')
elif path_format == 'tilde-str':
out_name = os.path.join('~', 'table')
else:
out_name = pathlib.Path('~', 'table')
for test_def in test_defs:
check_write_table(test_def, data, fast_writer, out=out_name)
check_write_table_via_table(
test_def, data, fast_writer, out=out_name)
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_fill_values(fast_writer):
data = ascii.read(tab_to_fill)
for test_def in test_defs_fill_value:
check_write_table(test_def, data, fast_writer)
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_fill_masked_different(fast_writer):
'''see discussion in #2255'''
data = ascii.read(tab_to_fill)
data = table.Table(data, masked=True)
data['a'].mask = [True, False]
data['c'].mask = [False, True]
for test_def in test_def_masked_fill_value:
check_write_table(test_def, data, fast_writer)
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_no_data_ipac(fast_writer):
"""Write an IPAC table that contains no data."""
table = ascii.get_reader(Reader=ascii.Ipac)
data = table.read('data/no_data_ipac.dat')
for test_def in test_defs_no_data:
check_write_table(test_def, data, fast_writer)
check_write_table_via_table(test_def, data, fast_writer)
def test_write_invalid_toplevel_meta_ipac():
"""Write an IPAC table that contains no data but has invalid (incorrectly
specified) metadata stored in the top-level metadata and therefore should
raise a warning, and check that the warning has been raised"""
table = ascii.get_reader(Reader=ascii.Ipac)
data = table.read('data/no_data_ipac.dat')
data.meta['blah'] = 'extra'
out = StringIO()
with pytest.warns(AstropyWarning, match=r'.*were not written.*') as warn:
data.write(out, format='ascii.ipac')
assert len(warn) == 1
def test_write_invalid_keyword_meta_ipac():
"""Write an IPAC table that contains no data but has invalid (incorrectly
specified) metadata stored appropriately in the ``keywords`` section
of the metadata but with invalid format and therefore should raise a
warning, and check that the warning has been raised"""
table = ascii.get_reader(Reader=ascii.Ipac)
data = table.read('data/no_data_ipac.dat')
data.meta['keywords']['blah'] = 'invalid'
out = StringIO()
with pytest.warns(AstropyWarning, match=r'.*has been skipped.*') as warn:
data.write(out, format='ascii.ipac')
assert len(warn) == 1
def test_write_valid_meta_ipac():
"""Write an IPAC table that contains no data and has *correctly* specified
metadata. No warnings should be issued"""
table = ascii.get_reader(Reader=ascii.Ipac)
data = table.read('data/no_data_ipac.dat')
data.meta['keywords']['blah'] = {'value': 'invalid'}
out = StringIO()
data.write(out, format='ascii.ipac')
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_comments(fast_writer):
"""Write comments in output originally read by io.ascii."""
data = ascii.read('#c1\n # c2\t\na,b,c\n# c3\n1,2,3')
out = StringIO()
ascii.write(data, out, format='basic', fast_writer=fast_writer)
expected = ['# c1', '# c2', '# c3', 'a b c', '1 2 3']
assert out.getvalue().splitlines() == expected
# header comes before comments for commented-header
out = StringIO()
ascii.write(data, out, format='commented_header', fast_writer=fast_writer)
expected = ['# a b c', '# c1', '# c2', '# c3', '1 2 3']
assert out.getvalue().splitlines() == expected
# setting comment=False should disable comment writing
out = StringIO()
ascii.write(data, out, format='basic', comment=False, fast_writer=fast_writer)
expected = ['a b c', '1 2 3']
assert out.getvalue().splitlines() == expected
@pytest.mark.parametrize("fast_writer", [True, False])
@pytest.mark.parametrize("fmt", ['%0.1f', '.1f', '0.1f', '{0:0.1f}'])
def test_write_format(fast_writer, fmt):
"""Check different formats for a column."""
data = ascii.read('#c1\n # c2\t\na,b,c\n# c3\n1.11,2.22,3.33')
out = StringIO()
expected = ['# c1', '# c2', '# c3', 'a b c', '1.1 2.22 3.33']
data['a'].format = fmt
ascii.write(data, out, format='basic', fast_writer=fast_writer)
assert out.getvalue().splitlines() == expected
@pytest.mark.parametrize("fast_writer", [True, False])
def test_strip_names(fast_writer):
"""Names should be stripped of whitespace by default."""
data = table.Table([[1], [2], [3]], names=(' A', 'B ', ' C '))
out = StringIO()
ascii.write(data, out, format='csv', fast_writer=fast_writer)
assert out.getvalue().splitlines()[0] == 'A,B,C'
def test_latex_units():
"""
Check to make sure that Latex and AASTex writers attempt to fall
back on the **unit** attribute of **Column** if the supplied
**latexdict** does not specify units.
"""
t = table.Table([table.Column(name='date', data=['a', 'b']),
table.Column(name='NUV exp.time', data=[1, 2])])
latexdict = copy.deepcopy(ascii.latexdicts['AA'])
latexdict['units'] = {'NUV exp.time': 's'}
out = StringIO()
expected = '''\
\\begin{table}{cc}
\\tablehead{\\colhead{date} & \\colhead{NUV exp.time}\\\\ \\colhead{ } & \\colhead{s}}
\\startdata
a & 1 \\\\
b & 2
\\enddata
\\end{table}
'''.replace('\n', os.linesep)
ascii.write(t, out, format='aastex', latexdict=latexdict)
assert out.getvalue() == expected
# use unit attribute instead
t['NUV exp.time'].unit = u.s
t['date'].unit = u.yr
out = StringIO()
ascii.write(t, out, format='aastex', latexdict=ascii.latexdicts['AA'])
assert out.getvalue() == expected.replace(
'colhead{s}', r'colhead{$\mathrm{s}$}').replace(
'colhead{ }', r'colhead{$\mathrm{yr}$}')
@pytest.mark.parametrize("fast_writer", [True, False])
def test_commented_header_comments(fast_writer):
"""
Test the fix for #3562 with confusing exception using comment=False
for the commented_header writer.
"""
t = table.Table([[1, 2]])
with pytest.raises(ValueError) as err:
out = StringIO()
ascii.write(t, out, format='commented_header', comment=False,
fast_writer=fast_writer)
assert "for the commented_header writer you must supply a string" in str(err.value)
@pytest.mark.parametrize("fast_writer", [True, False])
def test_byte_string_output(fast_writer):
"""
Test the fix for #4350 where byte strings were output with a
leading `b` on Py3.
"""
t = table.Table([['Hello', 'World']], dtype=['S10'])
out = StringIO()
ascii.write(t, out, fast_writer=fast_writer)
assert out.getvalue().splitlines() == ['col0', 'Hello', 'World']
@pytest.mark.parametrize('names, include_names, exclude_names, formats, issues_warning', [
(['x', 'y'], ['x', 'y'], ['x'], {'x': '%d', 'y': '%f'}, True),
(['x', 'y'], ['x', 'y'], ['y'], {'x': '%d'}, False),
(['x', 'y'], ['x', 'y'], [], {'p': '%d', 'q': '%f'}, True),
(['x', 'y'], ['x', 'y'], [], {'z': '%f'}, True),
(['x', 'y'], ['x', 'y'], [], {'x': '%d'}, False),
(['x', 'y'], ['x', 'y'], [], {'p': '%d', 'y': '%f'}, True),
(['x', 'y'], ['x', 'y'], [], {}, False)
])
def test_names_with_formats(names, include_names, exclude_names, formats, issues_warning):
"""Test for #4508."""
t = table.Table([[1, 2, 3], [4.1, 5.2, 6.3]])
out = StringIO()
if issues_warning:
ctx = pytest.warns(AstropyWarning)
else:
ctx = nullcontext()
with ctx as warn:
ascii.write(t, out, names=names, include_names=include_names,
exclude_names=exclude_names, formats=formats)
if issues_warning:
assert len(warn) == 1
@pytest.mark.parametrize('formats, issues_warning', [
({'p': '%d', 'y': '%f'}, True),
({'x': '%d', 'y': '%f'}, True),
({'z': '%f'}, True),
({}, False)
])
def test_columns_names_with_formats(formats, issues_warning):
"""Test the fix for #4508."""
t = table.Table([[1, 2, 3], [4.1, 5.2, 6.3]])
out = StringIO()
if issues_warning:
ctx = pytest.warns(AstropyWarning)
else:
ctx = nullcontext()
with ctx as warn:
ascii.write(t, out, formats=formats)
if issues_warning:
assert len(warn) == 1
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_quoted_empty_field(fast_writer):
"""
Test the fix for #4350 where byte strings were output with a
leading `b` on Py3.
"""
t = table.Table([['Hello', ''], ['', '']], dtype=['S10', 'S10'])
out = StringIO()
ascii.write(t, out, fast_writer=fast_writer)
assert out.getvalue().splitlines() == ['col0 col1', 'Hello ""', '"" ""']
out = StringIO()
ascii.write(t, out, fast_writer=fast_writer, delimiter=',')
assert out.getvalue().splitlines() == ['col0,col1', 'Hello,', ',']
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_empty_table(fast_writer):
"""Test writing empty table #8275."""
t = table.Table([[]], dtype=['S2'])
out = StringIO()
ascii.write(t, out, fast_writer=fast_writer)
assert out.getvalue().splitlines() == ['col0']
@pytest.mark.parametrize("format", ['ascii', 'csv', 'html', 'latex',
'ascii.fixed_width', 'html'])
@pytest.mark.parametrize("fast_writer", [True, False])
@pytest.mark.parametrize('path_format', ['plain', 'tilde-str', 'tilde-pathlib'])
def test_write_overwrite_ascii(format, fast_writer, tmpdir, home_is_tmpdir,
path_format):
"""Test overwrite argument for various ASCII writers"""
true_filename = tmpdir.join("table-tmp.dat").strpath
if path_format == 'plain':
filename = true_filename
elif path_format == 'tilde-str':
filename = os.path.join('~', 'table-tmp.dat')
else:
filename = pathlib.Path('~', 'table-tmp.dat')
with open(true_filename, 'w'):
# create empty file
pass
t = table.Table([['Hello', ''], ['', '']], dtype=['S10', 'S10'])
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
t.write(filename, format=format, fast_writer=fast_writer)
t.write(filename, overwrite=True, format=format,
fast_writer=fast_writer)
# If the output is a file object, overwrite is ignored
with open(true_filename, 'w') as fp:
t.write(fp, overwrite=False, format=format,
fast_writer=fast_writer)
t.write(fp, overwrite=True, format=format,
fast_writer=fast_writer)
if 'tilde' in path_format:
# Ensure no files have been accidentally written to a literal tilde path
assert not os.path.exists(filename)
fmt_name_classes = list(chain(ascii.core.FAST_CLASSES.items(),
ascii.core.FORMAT_CLASSES.items()))
@pytest.mark.parametrize("fmt_name_class", fmt_name_classes)
def test_roundtrip_masked(fmt_name_class):
"""
Round trip a simple masked table through every writable format and confirm
that reading back gives the same result.
"""
fmt_name, fmt_cls = fmt_name_class
if not getattr(fmt_cls, '_io_registry_can_write', True):
return
# Skip tests for fixed_width or HTML without bs4
if ((fmt_name == 'html' and not HAS_BS4)
or fmt_name == 'fixed_width'):
return
if 'qdp' in fmt_name:
# QDP tables are for numeric values only
t = simple_table(masked=True, kinds=['f', 'i'])
else:
t = simple_table(masked=True)
out = StringIO()
fast = fmt_name in ascii.core.FAST_CLASSES
try:
ascii.write(t, out, format=fmt_name, fast_writer=fast)
except ImportError: # Some failed dependency, skip test
return
# No-header formats need to be told the column names
kwargs = {'names': t.colnames} if 'no_header' in fmt_name else {}
if 'qdp' in fmt_name:
kwargs.update({'table_id': 0, 'names': t.colnames})
t2 = ascii.read(out.getvalue(), format=fmt_name, fast_reader=fast, guess=False, **kwargs)
assert t.colnames == t2.colnames
for col, col2 in zip(t.itercols(), t2.itercols()):
assert col.dtype.kind == col2.dtype.kind
assert np.all(col == col2)
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_newlines(fast_writer, tmpdir):
# Regression test for https://github.com/astropy/astropy/issues/5126
# On windows, when writing to a filename (not e.g. StringIO), newlines were
# \r\r\n instead of \r\n.
filename = tmpdir.join('test').strpath
t = table.Table([['a', 'b', 'c']], names=['col'])
ascii.write(t, filename, fast_writer=fast_writer)
with open(filename, 'r', newline='') as f:
content = f.read()
assert content == os.linesep.join(['col', 'a', 'b', 'c']) + os.linesep
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_csv_with_comments(fast_writer):
"""
Test fix for #7357 where writing a Table with comments to 'csv' fails with
a cryptic message. The comments are dropped by default, but when comment='#'
is supplied they are still written.
"""
out = StringIO()
t = table.Table([[1, 2], [3, 4]], names=['a', 'b'])
t.meta['comments'] = ['hello']
ascii.write(t, out, format='csv', fast_writer=fast_writer)
assert out.getvalue().splitlines() == ['a,b', '1,3', '2,4']
out = StringIO()
ascii.write(t, out, format='csv', fast_writer=fast_writer, comment='#')
assert out.getvalue().splitlines() == ['#hello', 'a,b', '1,3', '2,4']
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_formatted_mixin(fast_writer):
"""
Test fix for #8680 where writing a QTable with a quantity mixin generates
an exception if a format is specified.
"""
out = StringIO()
t = table.QTable([[1, 2], [1, 2] * u.m], names=['a', 'b'])
ascii.write(t, out, fast_writer=fast_writer, formats={'a': '%02d', 'b': '%.2f'})
assert out.getvalue().splitlines() == ['a b',
'01 1.00',
'02 2.00']
def test_validate_write_kwargs():
out = StringIO()
t = table.QTable([[1, 2], [1, 2]], names=['a', 'b'])
with pytest.raises(TypeError, match=r"write\(\) argument 'fast_writer' must be a "
r"\(<class 'bool'>, <class 'str'>\) object, "
r"got <class 'int'> instead"):
ascii.write(t, out, fast_writer=12)
@pytest.mark.parametrize("fmt_name_class", fmt_name_classes)
def test_multidim_column_error(fmt_name_class):
"""
Test that trying to write a multidim column fails in every format except
ECSV.
"""
fmt_name, fmt_cls = fmt_name_class
if not getattr(fmt_cls, '_io_registry_can_write', True):
return
# Skip tests for ecsv or HTML without bs4. See the comment in latex.py
# Latex class where max_ndim = None is defined regarding latex and aastex.
if ((fmt_name == 'html' and not HAS_BS4)
or fmt_name in ('ecsv', 'latex', 'aastex')):
return
out = StringIO()
t = table.Table()
t['a'] = np.arange(16).reshape(2, 2, 2, 2)
t['b'] = [1, 2]
fast = fmt_name in ascii.core.FAST_CLASSES
with pytest.raises(ValueError, match=r'column\(s\) with dimension'):
ascii.write(t, out, format=fmt_name, fast_writer=fast)
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_as_columns(fast_writer):
"""
Test that writing a set of columns also roundtrips (as long as the
table does not have metadata, etc.)
"""
# Use masked in case that makes it more difficult.
data = ascii.read(tab_to_fill)
data = table.Table(data, masked=True)
data['a'].mask = [True, False]
data['c'].mask = [False, True]
data = list(data.columns.values())
for test_def in test_def_masked_fill_value:
check_write_table(test_def, data, fast_writer)
|
7999d27da4d811fe76c174b1092b556dacd15431ae7c1f769223603bfbcb614f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import pytest
import numpy as np
from astropy.table import Table, QTable, Column
from astropy.table.table_helpers import simple_table
from astropy.units import allclose as quantity_allclose
from astropy.units.quantity import QuantityInfo
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH
from astropy.io.misc.hdf5 import meta_path
from astropy.utils.compat import NUMPY_LT_1_22
from astropy.utils.compat.optional_deps import HAS_H5PY # noqa
if HAS_H5PY:
import h5py
from astropy.io.tests.mixin_columns import mixin_cols, compare_attrs, serialized_names
# HDF5 does not support object dtype (since it stores binary representations).
unsupported_cols = {name: col for name, col in mixin_cols.items()
if (isinstance(col, np.ndarray) and col.dtype.kind == 'O')}
mixin_cols = {name: col for name, col in mixin_cols.items()
if name not in unsupported_cols}
ALL_DTYPES = [np.uint8, np.uint16, np.uint32, np.uint64, np.int8,
np.int16, np.int32, np.int64, np.float32, np.float64,
np.bool_, '|S3']
def _default_values(dtype):
if dtype == np.bool_:
return [0, 1, 1]
elif dtype == '|S3':
return [b'abc', b'def', b'ghi']
else:
return [1, 2, 3]
@pytest.fixture
def home_is_tmpdir(monkeypatch, tmpdir):
"""
Pytest fixture to run a test case with tilde-prefixed paths.
In the tilde-path case, environment variables are temporarily
modified so that '~' resolves to the temp directory.
"""
# For Unix
monkeypatch.setenv('HOME', str(tmpdir))
# For Windows
monkeypatch.setenv('USERPROFILE', str(tmpdir))
@pytest.mark.skipif('not HAS_H5PY')
def test_write_nopath(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
with pytest.warns(UserWarning, match="table path was not set via the path= argument"):
t1.write(test_file)
t1 = Table.read(test_file, path='__astropy_table__')
@pytest.mark.skipif('not HAS_H5PY')
def test_write_nopath_nonempty(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='bubu')
with pytest.raises(ValueError) as exc:
t1.write(test_file, append=True)
assert 'table path should always be set via the path=' in exc.value.args[0]
@pytest.mark.skipif('not HAS_H5PY')
def test_read_notable_nopath(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
h5py.File(test_file, 'w').close() # create empty file
with pytest.raises(ValueError, match='no table found in HDF5 group /'):
Table.read(test_file, path='/', format='hdf5')
@pytest.mark.skipif('not HAS_H5PY')
def test_read_nopath(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path="the_table")
t2 = Table.read(test_file)
assert np.all(t1['a'] == t2['a'])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_nopath_multi_tables(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path="the_table")
t1.write(test_file, path="the_table_but_different", append=True,
overwrite=True)
with pytest.warns(AstropyUserWarning,
match=r"path= was not specified but multiple tables"):
t2 = Table.read(test_file)
assert np.all(t1['a'] == t2['a'])
@pytest.mark.skipif('not HAS_H5PY')
def test_write_invalid_path(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
with pytest.raises(ValueError) as exc:
t1.write(test_file, path='test/')
assert exc.value.args[0] == "table path should end with table name, not /"
@pytest.mark.skipif('not HAS_H5PY')
def test_read_invalid_path(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='the_table')
with pytest.raises(OSError) as exc:
Table.read(test_file, path='test/')
assert exc.value.args[0] == "Path test/ does not exist"
@pytest.mark.skipif('not HAS_H5PY')
def test_read_missing_group(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
h5py.File(test_file, 'w').close() # create empty file
with pytest.raises(OSError) as exc:
Table.read(test_file, path='test/path/table')
assert exc.value.args[0] == "Path test/path/table does not exist"
@pytest.mark.skipif('not HAS_H5PY')
def test_read_missing_table(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
with h5py.File(test_file, 'w') as f:
f.create_group('test').create_group('path')
with pytest.raises(OSError) as exc:
Table.read(test_file, path='test/path/table')
assert exc.value.args[0] == "Path test/path/table does not exist"
@pytest.mark.skipif('not HAS_H5PY')
def test_read_missing_group_fileobj(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
with h5py.File(test_file, 'w') as f:
with pytest.raises(OSError) as exc:
Table.read(f, path='test/path/table')
assert exc.value.args[0] == "Path test/path/table does not exist"
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_simple(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='the_table')
t2 = Table.read(test_file, path='the_table')
assert np.all(t2['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_existing_table(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='the_table')
with pytest.raises(OSError) as exc:
t1.write(test_file, path='the_table', append=True)
assert exc.value.args[0] == "Table the_table already exists"
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_memory(tmpdir):
with h5py.File('test', 'w', driver='core', backing_store=False) as output_file:
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(output_file, path='the_table')
t2 = Table.read(output_file, path='the_table')
assert np.all(t2['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_existing(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
h5py.File(test_file, 'w').close() # create empty file
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
t1.write(test_file, path='the_table')
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_existing_overwrite(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
h5py.File(test_file, 'w').close() # create empty file
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='the_table', overwrite=True)
t2 = Table.read(test_file, path='the_table')
assert np.all(t2['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_existing_append(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
h5py.File(test_file, 'w').close() # create empty file
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='the_table_1', append=True)
t1.write(test_file, path='the_table_2', append=True)
t2 = Table.read(test_file, path='the_table_1')
assert np.all(t2['a'] == [1, 2, 3])
t3 = Table.read(test_file, path='the_table_2')
assert np.all(t3['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_existing_append_groups(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
with h5py.File(test_file, 'w') as f:
f.create_group('test_1')
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='test_1/the_table_1', append=True)
t1.write(test_file, path='test_2/the_table_2', append=True)
t2 = Table.read(test_file, path='test_1/the_table_1')
assert np.all(t2['a'] == [1, 2, 3])
t3 = Table.read(test_file, path='test_2/the_table_2')
assert np.all(t3['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_existing_append_overwrite(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='table1')
t1.write(test_file, path='table2', append=True)
t1v2 = Table()
t1v2.add_column(Column(name='a', data=[4, 5, 6]))
with pytest.raises(OSError) as exc:
t1v2.write(test_file, path='table1', append=True)
assert exc.value.args[0] == 'Table table1 already exists'
t1v2.write(test_file, path='table1', append=True, overwrite=True)
t2 = Table.read(test_file, path='table1')
assert np.all(t2['a'] == [4, 5, 6])
t3 = Table.read(test_file, path='table2')
assert np.all(t3['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_fileobj(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='the_table')
import h5py
with h5py.File(test_file, 'r') as input_file:
t2 = Table.read(input_file, path='the_table')
assert np.all(t2['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_filobj_path(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='path/to/data/the_table')
import h5py
with h5py.File(test_file, 'r') as input_file:
t2 = Table.read(input_file, path='path/to/data/the_table')
assert np.all(t2['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_filobj_group_path(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='path/to/data/the_table')
import h5py
with h5py.File(test_file, 'r') as input_file:
t2 = Table.read(input_file['path/to'], path='data/the_table')
assert np.all(t2['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_wrong_fileobj():
class FakeFile:
def read(self):
pass
f = FakeFile()
with pytest.raises(TypeError, match='h5py can only open regular files'):
Table.read(f, format='hdf5')
@pytest.mark.skipif('not HAS_H5PY')
def test_write_fileobj(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
import h5py
with h5py.File(test_file, 'w') as output_file:
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(output_file, path='the_table')
t2 = Table.read(test_file, path='the_table')
assert np.all(t2['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_write_create_dataset_kwargs(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
the_path = 'the_table'
import h5py
with h5py.File(test_file, 'w') as output_file:
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(output_file, path=the_path,
maxshape=(None, ))
# A roundabout way of checking this, but the table created above should be
# resizable if the kwarg was passed through successfully
t2 = Table()
t2.add_column(Column(name='a', data=[4, 5]))
with h5py.File(test_file, 'a') as output_file:
output_file[the_path].resize((len(t1) + len(t2), ))
output_file[the_path][len(t1):] = t2.as_array()
t3 = Table.read(test_file, path='the_table')
assert np.all(t3['a'] == [1, 2, 3, 4, 5])
@pytest.mark.skipif('not HAS_H5PY')
def test_write_filobj_group(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
import h5py
with h5py.File(test_file, 'w') as output_file:
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(output_file, path='path/to/data/the_table')
t2 = Table.read(test_file, path='path/to/data/the_table')
assert np.all(t2['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_write_wrong_type():
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
with pytest.raises(TypeError) as exc:
t1.write(1212, path='path/to/data/the_table', format='hdf5')
assert exc.value.args[0] == ('output should be a string '
'or an h5py File or Group object')
@pytest.mark.skipif('not HAS_H5PY')
@pytest.mark.parametrize(('dtype'), ALL_DTYPES)
def test_preserve_single_dtypes(tmpdir, dtype):
test_file = str(tmpdir.join('test.hdf5'))
values = _default_values(dtype)
t1 = Table()
t1.add_column(Column(name='a', data=np.array(values, dtype=dtype)))
t1.write(test_file, path='the_table')
t2 = Table.read(test_file, path='the_table')
assert np.all(t2['a'] == values)
assert t2['a'].dtype == dtype
@pytest.mark.skipif('not HAS_H5PY')
def test_preserve_all_dtypes(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
for dtype in ALL_DTYPES:
values = _default_values(dtype)
t1.add_column(Column(name=str(dtype), data=np.array(values, dtype=dtype)))
t1.write(test_file, path='the_table')
t2 = Table.read(test_file, path='the_table')
for dtype in ALL_DTYPES:
values = _default_values(dtype)
assert np.all(t2[str(dtype)] == values)
assert t2[str(dtype)].dtype == dtype
@pytest.mark.skipif('not HAS_H5PY')
def test_preserve_meta(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.meta['a'] = 1
t1.meta['b'] = 'hello'
t1.meta['c'] = 3.14159
t1.meta['d'] = True
t1.meta['e'] = np.array([1, 2, 3])
t1.write(test_file, path='the_table')
t2 = Table.read(test_file, path='the_table')
for key in t1.meta:
assert np.all(t1.meta[key] == t2.meta[key])
@pytest.mark.skipif('not HAS_H5PY')
def test_preserve_serialized(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1['a'] = Column(data=[1, 2, 3], unit="s")
t1['a'].meta['a0'] = "A0"
t1['a'].meta['a1'] = {"a1": [0, 1]}
t1['a'].format = '7.3f'
t1['a'].description = 'A column'
t1.meta['b'] = 1
t1.meta['c'] = {"c0": [0, 1]}
t1.write(test_file, path='the_table', serialize_meta=True, overwrite=True)
t2 = Table.read(test_file, path='the_table')
assert t1['a'].unit == t2['a'].unit
assert t1['a'].format == t2['a'].format
assert t1['a'].description == t2['a'].description
assert t1['a'].meta == t2['a'].meta
assert t1.meta == t2.meta
# Check that the meta table is fixed-width bytes (see #11299)
h5 = h5py.File(test_file, 'r')
meta_lines = h5[meta_path('the_table')]
assert meta_lines.dtype.kind == 'S'
@pytest.mark.skipif('not HAS_H5PY')
def test_preserve_serialized_old_meta_format(tmpdir):
"""Test the old meta format
Only for some files created prior to v4.0, in compatibility mode.
"""
test_file = get_pkg_data_filename('data/old_meta_example.hdf5')
t1 = Table()
t1['a'] = Column(data=[1, 2, 3], unit="s")
t1['a'].meta['a0'] = "A0"
t1['a'].meta['a1'] = {"a1": [0, 1]}
t1['a'].format = '7.3f'
t1['a'].description = 'A column'
t1.meta['b'] = 1
t1.meta['c'] = {"c0": [0, 1]}
t2 = Table.read(test_file, path='the_table')
assert t1['a'].unit == t2['a'].unit
assert t1['a'].format == t2['a'].format
assert t1['a'].description == t2['a'].description
assert t1['a'].meta == t2['a'].meta
assert t1.meta == t2.meta
@pytest.mark.skipif('not HAS_H5PY')
def test_preserve_serialized_in_complicated_path(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1['a'] = Column(data=[1, 2, 3], unit="s")
t1['a'].meta['a0'] = "A0"
t1['a'].meta['a1'] = {"a1": [0, 1]}
t1['a'].format = '7.3f'
t1['a'].description = 'A column'
t1.meta['b'] = 1
t1.meta['c'] = {"c0": [0, 1]}
t1.write(test_file, path='the_table/complicated/path', serialize_meta=True,
overwrite=True)
t2 = Table.read(test_file, path='the_table/complicated/path')
assert t1['a'].format == t2['a'].format
assert t1['a'].unit == t2['a'].unit
assert t1['a'].description == t2['a'].description
assert t1['a'].meta == t2['a'].meta
assert t1.meta == t2.meta
@pytest.mark.skipif('not HAS_H5PY')
def test_metadata_very_large(tmpdir):
"""Test that very large datasets work, now!"""
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1['a'] = Column(data=[1, 2, 3], unit="s")
t1['a'].meta['a0'] = "A0"
t1['a'].meta['a1'] = {"a1": [0, 1]}
t1['a'].format = '7.3f'
t1['a'].description = 'A column'
t1.meta['b'] = 1
t1.meta['c'] = {"c0": [0, 1]}
t1.meta["meta_big"] = "0" * (2 ** 16 + 1)
t1.meta["meta_biggerstill"] = "0" * (2 ** 18)
t1.write(test_file, path='the_table', serialize_meta=True, overwrite=True)
t2 = Table.read(test_file, path='the_table')
assert t1['a'].unit == t2['a'].unit
assert t1['a'].format == t2['a'].format
assert t1['a'].description == t2['a'].description
assert t1['a'].meta == t2['a'].meta
assert t1.meta == t2.meta
@pytest.mark.skipif('not HAS_H5PY')
def test_skip_meta(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.meta['a'] = 1
t1.meta['b'] = 'hello'
t1.meta['c'] = 3.14159
t1.meta['d'] = True
t1.meta['e'] = np.array([1, 2, 3])
t1.meta['f'] = str
wtext = f"Attribute `f` of type {type(t1.meta['f'])} cannot be written to HDF5 files - skipping"
with pytest.warns(AstropyUserWarning, match=wtext) as w:
t1.write(test_file, path='the_table')
assert len(w) == 1
@pytest.mark.skipif('not HAS_H5PY')
def test_fail_meta_serialize(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.meta['f'] = str
with pytest.raises(Exception) as err:
t1.write(test_file, path='the_table', serialize_meta=True)
assert "cannot represent an object" in str(err.value)
assert "<class 'str'>" in str(err.value)
@pytest.mark.skipif('not HAS_H5PY')
def test_read_h5py_objects(tmpdir):
# Regression test - ensure that Datasets are recognized automatically
test_file = str(tmpdir.join('test.hdf5'))
import h5py
with h5py.File(test_file, 'w') as output_file:
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(output_file, path='the_table')
f = h5py.File(test_file, mode='r')
t2 = Table.read(f, path='the_table')
assert np.all(t2['a'] == [1, 2, 3])
t3 = Table.read(f['/'], path='the_table')
assert np.all(t3['a'] == [1, 2, 3])
t4 = Table.read(f['the_table'])
assert np.all(t4['a'] == [1, 2, 3])
f.close() # don't raise an error in 'test --open-files'
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_unicode_to_hdf5(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t = Table()
t['p'] = ['a', 'b', 'c']
t['q'] = [1, 2, 3]
t['r'] = [b'a', b'b', b'c']
t['s'] = ["\u2119", "\u01b4", "\u2602"]
t.write(test_file, path='the_table', overwrite=True)
t1 = Table.read(test_file, path='the_table', character_as_bytes=False)
for col, col1 in zip(t.itercols(), t1.itercols()):
assert np.all(col == col1)
assert np.all(t1['p'].info.dtype.kind == "U")
assert np.all(t1['q'].info.dtype.kind == "i")
assert np.all(t1['r'].info.dtype.kind == "U")
assert np.all(t1['s'].info.dtype.kind == "U")
# Test default (character_as_bytes=True)
t2 = Table.read(test_file, path='the_table')
for col, col1 in zip(t.itercols(), t2.itercols()):
assert np.all(col == col1)
assert np.all(t2['p'].info.dtype.kind == "S")
assert np.all(t2['q'].info.dtype.kind == "i")
assert np.all(t2['r'].info.dtype.kind == "S")
assert np.all(t2['s'].info.dtype.kind == "S")
def assert_objects_equal(obj1, obj2, attrs, compare_class=True):
if compare_class:
assert obj1.__class__ is obj2.__class__
info_attrs = ['info.name', 'info.format', 'info.unit', 'info.description', 'info.meta',
'info.dtype']
for attr in attrs + info_attrs:
a1 = obj1
a2 = obj2
for subattr in attr.split('.'):
try:
a1 = getattr(a1, subattr)
a2 = getattr(a2, subattr)
except AttributeError:
a1 = a1[subattr]
a2 = a2[subattr]
# Mixin info.meta can None instead of empty OrderedDict(), #6720 would
# fix this.
if attr == 'info.meta':
if a1 is None:
a1 = {}
if a2 is None:
a2 = {}
if isinstance(a1, np.ndarray) and a1.dtype.kind == 'f':
assert quantity_allclose(a1, a2, rtol=1e-15)
elif isinstance(a1, np.dtype):
# HDF5 does not perfectly preserve dtype: byte order can change, and
# unicode gets stored as bytes. So, we just check safe casting, to
# ensure we do not, e.g., accidentally change integer to float, etc.
if NUMPY_LT_1_22 and a1.names:
# For old numpy, can_cast does not deal well with structured dtype.
assert a1.names == a2.names
else:
assert np.can_cast(a2, a1, casting='safe')
else:
assert np.all(a1 == a2)
@pytest.mark.skipif('not HAS_H5PY')
def test_hdf5_mixins_qtable_to_table(tmpdir):
"""Test writing as QTable and reading as Table. Ensure correct classes
come out.
"""
filename = str(tmpdir.join('test_simple.hdf5'))
names = sorted(mixin_cols)
t = QTable([mixin_cols[name] for name in names], names=names)
t.write(filename, format='hdf5', path='root', serialize_meta=True)
t2 = Table.read(filename, format='hdf5', path='root')
assert t.colnames == t2.colnames
for name, col in t.columns.items():
col2 = t2[name]
attrs = compare_attrs[name]
compare_class = True
if isinstance(col.info, QuantityInfo):
# Downgrade Quantity to Column + unit
assert type(col2) is Column
# Class-specific attributes like `value` or `wrap_angle` are lost.
attrs = ['unit']
compare_class = False
# Compare data values here (assert_objects_equal doesn't know how in this case)
assert np.all(col.value == col2)
assert_objects_equal(col, col2, attrs, compare_class)
@pytest.mark.skipif('not HAS_H5PY')
@pytest.mark.parametrize('table_cls', (Table, QTable))
def test_hdf5_mixins_as_one(table_cls, tmpdir):
"""Test write/read all cols at once and validate intermediate column names"""
filename = str(tmpdir.join('test_simple.hdf5'))
names = sorted(mixin_cols)
all_serialized_names = []
for name in names:
all_serialized_names.extend(serialized_names[name])
t = table_cls([mixin_cols[name] for name in names], names=names)
t.meta['C'] = 'spam'
t.meta['comments'] = ['this', 'is', 'a', 'comment']
t.meta['history'] = ['first', 'second', 'third']
t.write(filename, format="hdf5", path='root', serialize_meta=True)
t2 = table_cls.read(filename, format='hdf5', path='root')
assert t2.meta['C'] == 'spam'
assert t2.meta['comments'] == ['this', 'is', 'a', 'comment']
assert t2.meta['history'] == ['first', 'second', 'third']
assert t.colnames == t2.colnames
# Read directly via hdf5 and confirm column names
h5 = h5py.File(filename, 'r')
h5_names = list(h5['root'].dtype.names)
assert h5_names == all_serialized_names
h5.close()
@pytest.mark.skipif('not HAS_H5PY')
@pytest.mark.parametrize('name_col', list(mixin_cols.items()))
@pytest.mark.parametrize('table_cls', (Table, QTable))
def test_hdf5_mixins_per_column(table_cls, name_col, tmpdir):
"""Test write/read one col at a time and do detailed validation"""
filename = str(tmpdir.join('test_simple.hdf5'))
name, col = name_col
c = [1.0, 2.0]
t = table_cls([c, col, c], names=['c1', name, 'c2'])
t[name].info.description = 'my description'
t[name].info.meta = {'list': list(range(50)), 'dict': {'a': 'b' * 200}}
if not t.has_mixin_columns:
pytest.skip('column is not a mixin (e.g. Quantity subclass in Table)')
t.write(filename, format="hdf5", path='root', serialize_meta=True)
t2 = table_cls.read(filename, format='hdf5', path='root')
assert t.colnames == t2.colnames
for colname in t.colnames:
compare = ['data'] if colname in ('c1', 'c2') else compare_attrs[colname]
assert_objects_equal(t[colname], t2[colname], compare)
# Special case to make sure Column type doesn't leak into Time class data
if name.startswith('tm'):
assert t2[name]._time.jd1.__class__ is np.ndarray
assert t2[name]._time.jd2.__class__ is np.ndarray
@pytest.mark.parametrize('name_col', unsupported_cols.items())
@pytest.mark.xfail(reason='column type unsupported')
def test_fits_unsupported_mixin(self, name_col, tmpdir):
# Check that we actually fail in writing unsupported columns defined
# on top.
filename = str(tmpdir.join('test_simple.fits'))
name, col = name_col
Table([col], names=[name]).write(filename, format='hdf5', path='root',
serialize_meta=True)
@pytest.mark.skipif('not HAS_H5PY')
def test_round_trip_masked_table_default(tmpdir):
"""Test round-trip of MaskedColumn through HDF5 using default serialization
that writes a separate mask column. Note:
>>> simple_table(masked=True)
<Table masked=True length=3>
a b c
int64 float64 str1
----- ------- ----
-- 1.0 c
2 2.0 --
3 -- e
"""
filename = str(tmpdir.join('test.h5'))
t = simple_table(masked=True) # int, float, and str cols with one masked element
t['c'] = [b'c', b'd', b'e']
t['c'].mask[1] = True
t.write(filename, format='hdf5', path='root', serialize_meta=True)
t2 = Table.read(filename)
assert t2.masked is False
assert t2.colnames == t.colnames
for name in t2.colnames:
assert np.all(t2[name].mask == t[name].mask)
assert np.all(t2[name] == t[name])
# Data under the mask round-trips also (unmask data to show this).
t[name].mask = False
t2[name].mask = False
assert np.all(t2[name] == t[name])
@pytest.mark.skipif('not HAS_H5PY')
def test_overwrite_serialized_meta():
# This used to cause an error because the meta data table
# was not removed from the existing file.
with h5py.File('test_data.h5', 'w', driver='core', backing_store=False) as out:
t1 = Table()
t1.add_column(Column(data=[4, 8, 15], unit='cm'))
t1.write(out, path='data', serialize_meta=True)
t2 = Table.read(out, path='data')
assert all(t1 == t2)
assert t1.info(out=None) == t2.info(out=None)
t3 = Table()
t3.add_column(Column(data=[16, 23, 42], unit='g'))
t3.write(out, path='data', serialize_meta=True, append=True, overwrite=True)
t2 = Table.read(out, path='data')
assert all(t3 == t2)
assert t3.info(out=None) == t2.info(out=None)
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_tilde_path(tmpdir, home_is_tmpdir):
test_file = os.path.join('~', 'test.hdf5')
t1 = Table()
t1['a'] = [1, 2, 3]
t1.write(test_file, path='the_table')
t1 = Table.read(test_file, path='the_table')
t1 = Table.read(test_file, path='the_table', format='hdf5')
# Ensure the data wasn't written to the literal tilde-prefixed path
assert not os.path.exists(test_file)
|
f93a3143abf4c4aff8ce6ac615037aba762a24fd94f378fc72450309395b777e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import pytest
asdf = pytest.importorskip('asdf')
import warnings
from packaging.version import Version
import numpy as np
from asdf import util
from asdf.tests import helpers
from asdf import AsdfFile
import asdf
import astropy.units as u
from astropy.modeling.core import fix_inputs
from astropy.modeling import models as astmodels
from astropy.utils.compat.optional_deps import HAS_SCIPY
def custom_and_analytical_inverse():
p1 = astmodels.Polynomial1D(1)
p2 = astmodels.Polynomial1D(1)
p3 = astmodels.Polynomial1D(1)
p4 = astmodels.Polynomial1D(1)
m1 = p1 & p2
m2 = p3 & p4
m1.inverse = m2
return m1
def custom_inputs_outputs():
m = astmodels.Gaussian2D()
m.inputs = ('a', 'b')
m.outputs = ('c',)
return m
test_models = [
astmodels.Identity(2), astmodels.Polynomial1D(2, c0=1, c1=2, c2=3),
astmodels.Polynomial2D(1, c0_0=1, c0_1=2, c1_0=3),
astmodels.Shift(2.),
astmodels.Hermite1D(2, c0=2, c1=3, c2=0.5),
astmodels.Legendre1D(2, c0=2, c1=3, c2=0.5),
astmodels.Chebyshev1D(2, c0=2, c1=3, c2=0.5),
astmodels.Chebyshev2D(1, 1, c0_0=1, c0_1=2, c1_0=3),
astmodels.Legendre2D(1, 1, c0_0=1, c0_1=2, c1_0=3),
astmodels.Hermite2D(1, 1, c0_0=1, c0_1=2, c1_0=3),
astmodels.Scale(3.4), astmodels.RotateNative2Celestial(5.63, -72.5, 180),
astmodels.Multiply(3), astmodels.Multiply(10*u.m),
astmodels.RotateCelestial2Native(5.63, -72.5, 180),
astmodels.EulerAngleRotation(23, 14, 2.3, axes_order='xzx'),
astmodels.Mapping((0, 1), n_inputs=3),
astmodels.Shift(2.*u.deg),
astmodels.Scale(3.4*u.deg),
astmodels.RotateNative2Celestial(5.63*u.deg, -72.5*u.deg, 180*u.deg),
astmodels.RotateCelestial2Native(5.63*u.deg, -72.5*u.deg, 180*u.deg),
astmodels.RotationSequence3D([1.2, 2.3, 3.4, .3], 'xyzx'),
astmodels.SphericalRotationSequence([1.2, 2.3, 3.4, .3], 'xyzy'),
astmodels.AiryDisk2D(amplitude=10., x_0=0.5, y_0=1.5),
astmodels.Box1D(amplitude=10., x_0=0.5, width=5.),
astmodels.Box2D(amplitude=10., x_0=0.5, x_width=5., y_0=1.5, y_width=7.),
astmodels.Const1D(amplitude=5.),
astmodels.Const2D(amplitude=5.),
astmodels.Disk2D(amplitude=10., x_0=0.5, y_0=1.5, R_0=5.),
astmodels.Ellipse2D(amplitude=10., x_0=0.5, y_0=1.5, a=2., b=4., theta=0.1),
astmodels.Exponential1D(amplitude=10., tau=3.5),
astmodels.Gaussian1D(amplitude=10., mean=5., stddev=3.),
astmodels.Gaussian2D(amplitude=10., x_mean=5., y_mean=5., x_stddev=3., y_stddev=3.),
astmodels.KingProjectedAnalytic1D(amplitude=10., r_core=5., r_tide=2.),
astmodels.Logarithmic1D(amplitude=10., tau=3.5),
astmodels.Lorentz1D(amplitude=10., x_0=0.5, fwhm=2.5),
astmodels.Moffat1D(amplitude=10., x_0=0.5, gamma=1.2, alpha=2.5),
astmodels.Moffat2D(amplitude=10., x_0=0.5, y_0=1.5, gamma=1.2, alpha=2.5),
astmodels.Planar2D(slope_x=0.5, slope_y=1.2, intercept=2.5),
astmodels.RedshiftScaleFactor(z=2.5),
astmodels.RickerWavelet1D(amplitude=10., x_0=0.5, sigma=1.2),
astmodels.RickerWavelet2D(amplitude=10., x_0=0.5, y_0=1.5, sigma=1.2),
astmodels.Ring2D(amplitude=10., x_0=0.5, y_0=1.5, r_in=5., width=10.),
astmodels.Sersic1D(amplitude=10., r_eff=1., n=4.),
astmodels.Sersic2D(amplitude=10., r_eff=1., n=4., x_0=0.5, y_0=1.5, ellip=0.0, theta=0.0),
astmodels.Sine1D(amplitude=10., frequency=0.5, phase=1.),
astmodels.Cosine1D(amplitude=10., frequency=0.5, phase=1.),
astmodels.Tangent1D(amplitude=10., frequency=0.5, phase=1.),
astmodels.ArcSine1D(amplitude=10., frequency=0.5, phase=1.),
astmodels.ArcCosine1D(amplitude=10., frequency=0.5, phase=1.),
astmodels.ArcTangent1D(amplitude=10., frequency=0.5, phase=1.),
astmodels.Trapezoid1D(amplitude=10., x_0=0.5, width=5., slope=1.),
astmodels.TrapezoidDisk2D(amplitude=10., x_0=0.5, y_0=1.5, R_0=5., slope=1.),
astmodels.Voigt1D(x_0=0.55, amplitude_L=10., fwhm_L=0.5, fwhm_G=0.9),
astmodels.BlackBody(scale=10.0, temperature=6000.*u.K),
astmodels.Drude1D(amplitude=10.0, x_0=0.5, fwhm=2.5),
astmodels.Plummer1D(mass=10.0, r_plum=5.0),
astmodels.BrokenPowerLaw1D(amplitude=10, x_break=0.5, alpha_1=2.0, alpha_2=3.5),
astmodels.ExponentialCutoffPowerLaw1D(10, 0.5, 2.0, 7.),
astmodels.LogParabola1D(amplitude=10, x_0=0.5, alpha=2., beta=3.,),
astmodels.PowerLaw1D(amplitude=10., x_0=0.5, alpha=2.0),
astmodels.SmoothlyBrokenPowerLaw1D(amplitude=10., x_break=5.0, alpha_1=2.0, alpha_2=3.0, delta=0.5),
custom_and_analytical_inverse(),
custom_inputs_outputs(),
]
if HAS_SCIPY:
test_models.append(astmodels.Spline1D(np.array([-3., -3., -3., -3., -1., 0., 1., 3., 3., 3., 3.]),
np.array([0.10412331, 0.07013616, -0.18799552, 1.35953147, -0.15282581, 0.03923, -0.04297299, 0., 0., 0., 0.]),
3))
math_models = []
for kl in astmodels.math.__all__:
klass = getattr(astmodels.math, kl)
math_models.append(klass())
test_models.extend(math_models)
test_models_with_constraints = [astmodels.Legendre2D(x_degree=1, y_degree=1,
c0_0=1, c0_1=2, c1_0=3,
fixed={'c1_0': True, 'c0_1': True},
bounds={'c0_0': (-10, 10)})]
test_models.extend(test_models_with_constraints)
def test_transforms_compound(tmpdir):
tree = {
'compound':
astmodels.Shift(1) & astmodels.Shift(2) |
astmodels.Sky2Pix_TAN() |
astmodels.Rotation2D() |
astmodels.AffineTransformation2D([[2, 0], [0, 2]], [42, 32]) +
astmodels.Rotation2D(32)
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_inverse_transforms(tmpdir):
rotation = astmodels.Rotation2D(32)
rotation.inverse = astmodels.Rotation2D(45)
real_rotation = astmodels.Rotation2D(32)
tree = {
'rotation': rotation,
'real_rotation': real_rotation
}
def check(ff):
assert ff.tree['rotation'].inverse.angle == 45
helpers.assert_roundtrip_tree(tree, tmpdir, asdf_check_func=check)
@pytest.mark.parametrize(('model'), test_models)
def test_single_model(tmpdir, model):
with warnings.catch_warnings():
# Some schema files are missing from asdf<=2.6.0 which causes warnings
if Version(asdf.__version__) <= Version('2.6.0'):
warnings.filterwarnings('ignore', 'Unable to locate schema file')
tree = {'single_model': model}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_name(tmpdir):
def check(ff):
assert ff.tree['rot'].name == 'foo'
tree = {'rot': astmodels.Rotation2D(23, name='foo')}
helpers.assert_roundtrip_tree(tree, tmpdir, asdf_check_func=check)
def test_zenithal_with_arguments(tmpdir):
tree = {
'azp': astmodels.Sky2Pix_AZP(0.5, 0.3)
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_naming_of_compound_model(tmpdir):
"""Issue #87"""
def asdf_check(ff):
assert ff.tree['model'].name == 'compound_model'
offx = astmodels.Shift(1)
scl = astmodels.Scale(2)
model = (offx | scl).rename('compound_model')
tree = {
'model': model
}
helpers.assert_roundtrip_tree(tree, tmpdir, asdf_check_func=asdf_check)
@pytest.mark.slow
def test_generic_projections(tmpdir):
from astropy.io.misc.asdf.tags.transform import projections
for tag_name, (name, params, version) in projections._generic_projections.items():
tree = {
'forward': util.resolve_name(
f'astropy.modeling.projections.Sky2Pix_{name}')(),
'backward': util.resolve_name(
f'astropy.modeling.projections.Pix2Sky_{name}')()
}
with warnings.catch_warnings():
# Some schema files are missing from asdf<=2.4.2 which causes warnings
if Version(asdf.__version__) <= Version('2.5.1'):
warnings.filterwarnings('ignore', 'Unable to locate schema file')
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_tabular_model(tmpdir):
points = np.arange(0, 5)
values = [1., 10, 2, 45, -3]
model = astmodels.Tabular1D(points=points, lookup_table=values)
tree = {'model': model}
helpers.assert_roundtrip_tree(tree, tmpdir)
table = np.array([[3., 0., 0.],
[0., 2., 0.],
[0., 0., 0.]])
points = ([1, 2, 3], [1, 2, 3])
model2 = astmodels.Tabular2D(points, lookup_table=table, bounds_error=False,
fill_value=None, method='nearest')
tree = {'model': model2}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_bounding_box(tmpdir):
model = astmodels.Shift(1) & astmodels.Shift(2)
model.bounding_box = ((1, 3), (2, 4))
tree = {'model': model}
helpers.assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.parametrize("standard_version", asdf.versioning.supported_versions)
def test_const1d(tmpdir, standard_version):
helpers.assert_roundtrip_tree(
{"model": astmodels.Const1D(amplitude=5.)},
tmpdir,
init_options={"version": standard_version}
)
@pytest.mark.parametrize("standard_version", asdf.versioning.supported_versions)
@pytest.mark.parametrize("model", [
astmodels.Polynomial1D(1, c0=5, c1=17),
astmodels.Polynomial1D(1, c0=5, c1=17, domain=[-5, 4], window=[-2, 3]),
astmodels.Polynomial2D(2, c0_0=3, c1_0=5, c0_1=7),
astmodels.Polynomial2D(
2, c0_0=3, c1_0=5, c0_1=7, x_domain=[-2, 2], y_domain=[-4, 4],
x_window=[-6, 6], y_window=[-8, 8]
),
])
def test_polynomial(tmpdir, standard_version, model):
helpers.assert_roundtrip_tree({"model": model}, tmpdir, init_options={"version": standard_version})
def test_domain_orthopoly(tmpdir):
model1d = astmodels.Chebyshev1D(2, c0=2, c1=3, c2=0.5, domain=[-2, 2])
model2d = astmodels.Chebyshev2D(1, 1, c0_0=1, c0_1=2, c1_0=3,
x_domain=[-2, 2], y_domain=[-2, 2])
fa = AsdfFile()
fa.tree['model1d'] = model1d
fa.tree['model2d'] = model2d
file_path = str(tmpdir.join('orthopoly_domain.asdf'))
fa.write_to(file_path)
with asdf.open(file_path) as f:
assert f.tree['model1d'](1.8) == model1d(1.8)
assert f.tree['model2d'](1.8, -1.5) == model2d(1.8, -1.5)
def test_window_orthopoly(tmpdir):
model1d = astmodels.Chebyshev1D(2, c0=2, c1=3, c2=0.5,
domain=[-2, 2], window=[-0.5, 0.5])
model2d = astmodels.Chebyshev2D(1, 1, c0_0=1, c0_1=2, c1_0=3,
x_domain=[-2, 2], y_domain=[-2, 2],
x_window=[-0.5, 0.5], y_window=[-0.1, 0.5])
fa = AsdfFile()
fa.tree['model1d'] = model1d
fa.tree['model2d'] = model2d
file_path = str(tmpdir.join('orthopoly_window.asdf'))
fa.write_to(file_path)
with asdf.open(file_path) as f:
assert f.tree['model1d'](1.8) == model1d(1.8)
assert f.tree['model2d'](1.8, -1.5) == model2d(1.8, -1.5)
def test_linear1d(tmpdir):
model = astmodels.Linear1D()
tree = {'model': model}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_linear1d_quantity(tmpdir):
model = astmodels.Linear1D(1*u.nm, 1*(u.nm/u.pixel))
tree = {'model': model}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_tabular_model_units(tmpdir):
points = np.arange(0, 5) * u.pix
values = [1., 10, 2, 45, -3] * u.nm
model = astmodels.Tabular1D(points=points, lookup_table=values)
tree = {'model': model}
helpers.assert_roundtrip_tree(tree, tmpdir)
table = np.array([[3., 0., 0.],
[0., 2., 0.],
[0., 0., 0.]]) * u.nm
points = ([1, 2, 3], [1, 2, 3]) * u.pix
model2 = astmodels.Tabular2D(points, lookup_table=table,
bounds_error=False, fill_value=None,
method='nearest')
tree = {'model': model2}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_fix_inputs(tmpdir):
with warnings.catch_warnings():
# Some schema files are missing from asdf<=2.4.2 which causes warnings
if Version(asdf.__version__) <= Version('2.5.1'):
warnings.filterwarnings('ignore', 'Unable to locate schema file')
model0 = astmodels.Pix2Sky_TAN()
model0.input_units_equivalencies = {'x': u.dimensionless_angles(),
'y': u.dimensionless_angles()}
model1 = astmodels.Rotation2D()
model = model0 | model1
tree = {
'compound': fix_inputs(model, {'x': 45}),
'compound1': fix_inputs(model, {0: 45})
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_fix_inputs_type():
with pytest.raises(TypeError):
tree = {
'compound': fix_inputs(3, {'x': 45})
}
helpers.assert_roundtrip_tree(tree, tmpdir)
with pytest.raises(AttributeError):
tree = {
'compound': astmodels.Pix2Sky_TAN() & {'x': 45}
}
helpers.assert_roundtrip_tree(tree, tmpdir)
comp_model = custom_and_analytical_inverse()
@pytest.mark.parametrize(('model'), [astmodels.Shift(1) & astmodels.Shift(2) | comp_model,
comp_model | astmodels.Shift(1) & astmodels.Shift(2),
astmodels.Shift(1) & comp_model,
comp_model & astmodels.Shift(1)
])
def test_custom_and_analytical(model, tmpdir):
fa = AsdfFile()
fa.tree['model'] = model
file_path = str(tmpdir.join('custom_and_analytical_inverse.asdf'))
fa.write_to(file_path)
with asdf.open(file_path) as f:
assert f.tree['model'].inverse is not None
def test_deserialize_compound_user_inverse(tmpdir):
"""
Confirm that we are able to correctly reconstruct a
compound model with a user inverse set on one of its
component models.
Due to code in TransformType that facilitates circular
inverses, the user inverse of the component model is
not available at the time that the CompoundModel is
constructed.
"""
yaml = """
model: !transform/concatenate-1.2.0
forward:
- !transform/shift-1.2.0
inverse: !transform/shift-1.2.0 {offset: 5.0}
offset: -10.0
- !transform/shift-1.2.0 {offset: -20.0}
"""
buff = helpers.yaml_to_asdf(yaml)
with asdf.open(buff) as af:
model = af["model"]
assert model.has_inverse()
assert model.inverse(-5, -20) == (0, 0)
# test some models and compound models with some input unit equivalencies
def models_with_input_eq():
# 1D model
m1 = astmodels.Shift(1*u.kg)
m1.input_units_equivalencies = {'x': u.mass_energy()}
# 2D model
m2 = astmodels.Const2D(10*u.Hz)
m2.input_units_equivalencies = {'x': u.dimensionless_angles(),
'y': u.dimensionless_angles()}
# 2D model with only one input equivalencies
m3 = astmodels.Const2D(10*u.Hz)
m3.input_units_equivalencies = {'x': u.dimensionless_angles()}
# model using equivalency that has args using units
m4 = astmodels.PowerLaw1D(amplitude=1*u.m, x_0=10*u.pix, alpha=7)
m4.input_units_equivalencies = {'x': u.equivalencies.pixel_scale(0.5*u.arcsec/u.pix)}
return[m1, m2, m3, m4]
def compound_models_with_input_eq():
m1 = astmodels.Gaussian1D(10*u.K, 11*u.arcsec, 12*u.arcsec)
m1.input_units_equivalencies = {'x': u.parallax()}
m2 = astmodels.Gaussian1D(5*u.s, 2*u.K, 3*u.K)
m2.input_units_equivalencies = {'x': u.temperature()}
return [m1|m2, m1&m2, m1+m2]
test_models.extend(models_with_input_eq())
test_models.extend(compound_models_with_input_eq())
|
9c7fb6fb8c8d3c705865fdd5e1597047634494d07966bc01db5e449346b3b7e2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from datetime import datetime
import pytest
from numpy.testing import assert_equal, assert_allclose
from astropy.table import Table, Column
from astropy.time import Time, TimeDelta
from astropy import units as u
from astropy.units import Quantity
from astropy.utils.data import get_pkg_data_filename
from astropy.tests.helper import assert_quantity_allclose
from astropy.timeseries.periodograms import BoxLeastSquares, LombScargle
from astropy.timeseries.sampled import TimeSeries
INPUT_TIME = Time(['2016-03-22T12:30:31',
'2015-01-21T12:30:32',
'2016-03-22T12:30:40'])
PLAIN_TABLE = Table([[1, 2, 11], [3, 4, 1], [1, 1, 1]], names=['a', 'b', 'c'])
CSV_FILE = get_pkg_data_filename('data/sampled.csv')
def test_empty_initialization():
ts = TimeSeries()
ts['time'] = Time([50001, 50002, 50003], format='mjd')
def test_empty_initialization_invalid():
# Make sure things crash when the first column added is not a time column
ts = TimeSeries()
with pytest.raises(ValueError) as exc:
ts['flux'] = [1, 2, 3]
assert exc.value.args[0] == ("TimeSeries object is invalid - expected "
"'time' as the first column but found 'flux'")
def test_initialize_only_time():
ts = TimeSeries(time=INPUT_TIME)
assert ts['time'] is ts.time
# NOTE: the object in the table is a copy
assert_equal(ts.time.isot, INPUT_TIME.isot)
def test_initialization_with_data():
ts = TimeSeries(time=INPUT_TIME, data=[[10, 2, 3], [4, 5, 6]], names=['a', 'b'])
assert_equal(ts.time.isot, INPUT_TIME.isot)
assert_equal(ts['a'], [10, 2, 3])
assert_equal(ts['b'], [4, 5, 6])
def test_initialize_only_data():
with pytest.raises(TypeError) as exc:
TimeSeries(data=[[10, 2, 3], [4, 5, 6]], names=['a', 'b'])
assert exc.value.args[0] == "Either 'time' or 'time_start' should be specified"
def test_initialization_with_table():
ts = TimeSeries(time=INPUT_TIME, data=PLAIN_TABLE)
assert ts.colnames == ['time', 'a', 'b', 'c']
def test_initialization_with_time_delta():
ts = TimeSeries(time_start=datetime(2018, 7, 1, 10, 10, 10),
time_delta=TimeDelta(3, format='sec'),
data=[[10, 2, 3], [4, 5, 6]], names=['a', 'b'])
assert_equal(ts.time.isot, ['2018-07-01T10:10:10.000',
'2018-07-01T10:10:13.000',
'2018-07-01T10:10:16.000'])
def test_initialization_missing_time_delta():
with pytest.raises(TypeError) as exc:
TimeSeries(time_start=datetime(2018, 7, 1, 10, 10, 10),
data=[[10, 2, 3], [4, 5, 6]], names=['a', 'b'])
assert exc.value.args[0] == "'time' is scalar, so 'time_delta' is required"
def test_initialization_invalid_time_and_time_start():
with pytest.raises(TypeError) as exc:
TimeSeries(time=INPUT_TIME, time_start=datetime(2018, 7, 1, 10, 10, 10),
data=[[10, 2, 3], [4, 5, 6]], names=['a', 'b'])
assert exc.value.args[0] == "Cannot specify both 'time' and 'time_start'"
def test_initialization_invalid_time_delta():
with pytest.raises(TypeError) as exc:
TimeSeries(time_start=datetime(2018, 7, 1, 10, 10, 10),
time_delta=[1, 4, 3],
data=[[10, 2, 3], [4, 5, 6]], names=['a', 'b'])
assert exc.value.args[0] == "'time_delta' should be a Quantity or a TimeDelta"
def test_initialization_with_time_in_data():
data = PLAIN_TABLE.copy()
data['time'] = INPUT_TIME
ts1 = TimeSeries(data=data)
assert set(ts1.colnames) == set(['time', 'a', 'b', 'c'])
assert all(ts1.time == INPUT_TIME)
ts2 = TimeSeries(data=[[10, 2, 3], INPUT_TIME], names=['a', 'time'])
assert set(ts2.colnames) == set(['time', 'a'])
assert all(ts2.time == INPUT_TIME)
with pytest.raises(TypeError) as exc:
# Don't allow ambiguous cases of passing multiple 'time' columns
TimeSeries(data=data, time=INPUT_TIME)
assert exc.value.args[0] == "'time' has been given both in the table and as a keyword argument"
with pytest.raises(TypeError) as exc:
# 'time' is a protected name, don't allow ambiguous cases
TimeSeries(time=INPUT_TIME, data=[[10, 2, 3], INPUT_TIME], names=['a', 'time'])
assert exc.value.args[0] == "'time' has been given both in the table and as a keyword argument"
def test_initialization_n_samples():
# Make sure things crash with incorrect n_samples
with pytest.raises(TypeError) as exc:
TimeSeries(time=INPUT_TIME, data=PLAIN_TABLE, n_samples=1000)
assert exc.value.args[0] == ("'n_samples' has been given both and it is not the "
"same length as the input data.")
def test_initialization_length_mismatch():
with pytest.raises(ValueError) as exc:
TimeSeries(time=INPUT_TIME, data=[[10, 2], [4, 5]], names=['a', 'b'])
assert exc.value.args[0] == "Length of 'time' (3) should match data length (2)"
def test_initialization_invalid_both_time_and_time_delta():
with pytest.raises(TypeError) as exc:
TimeSeries(time=INPUT_TIME, time_delta=TimeDelta(3, format='sec'))
assert exc.value.args[0] == ("'time_delta' should not be specified since "
"'time' is an array")
def test_fold():
times = Time([1, 2, 3, 8, 9, 12], format='unix')
ts = TimeSeries(time=times)
ts['flux'] = [1, 4, 4, 3, 2, 3]
# Try without epoch time, as it should default to the first time and
# wrapping at half the period.
tsf = ts.fold(period=3.2 * u.s)
assert isinstance(tsf.time, TimeDelta)
assert_allclose(tsf.time.sec, [0, 1, -1.2, 0.6, -1.6, 1.4], rtol=1e-6)
# Try with epoch time
tsf = ts.fold(period=3.2 * u.s, epoch_time=Time(1.6, format='unix'))
assert isinstance(tsf.time, TimeDelta)
assert_allclose(tsf.time.sec, [-0.6, 0.4, 1.4, 0.0, 1.0, 0.8], rtol=1e-6, atol=1e-6)
# Now with wrap_phase set to the full period
tsf = ts.fold(period=3.2 * u.s, wrap_phase=3.2 * u.s)
assert isinstance(tsf.time, TimeDelta)
assert_allclose(tsf.time.sec, [0, 1, 2, 0.6, 1.6, 1.4], rtol=1e-6)
# Now set epoch_phase to be 1/4 of the way through the phase
tsf = ts.fold(period=3.2 * u.s, epoch_phase=0.8 * u.s)
assert isinstance(tsf.time, TimeDelta)
assert_allclose(tsf.time.sec, [0.8, -1.4, -0.4, 1.4, -0.8, -1.0], rtol=1e-6)
# And combining epoch_phase and wrap_phase
tsf = ts.fold(period=3.2 * u.s, epoch_phase=0.8 * u.s, wrap_phase=3.2 * u.s)
assert isinstance(tsf.time, TimeDelta)
assert_allclose(tsf.time.sec, [0.8, 1.8, 2.8, 1.4, 2.4, 2.2], rtol=1e-6)
# Now repeat the above tests but with normalization applied
# Try without epoch time, as it should default to the first time and
# wrapping at half the period.
tsf = ts.fold(period=3.2 * u.s, normalize_phase=True)
assert isinstance(tsf.time, Quantity)
assert_allclose(tsf.time.to_value(u.one),
[0, 1/3.2, -1.2/3.2, 0.6/3.2, -1.6/3.2, 1.4/3.2],
rtol=1e-6)
# Try with epoch time
tsf = ts.fold(period=3.2 * u.s, epoch_time=Time(1.6, format='unix'),
normalize_phase=True)
assert isinstance(tsf.time, Quantity)
assert_allclose(tsf.time.to_value(u.one),
[-0.6/3.2, 0.4/3.2, 1.4/3.2, 0.0/3.2, 1.0/3.2, 0.8/3.2],
rtol=1e-6, atol=1e-6)
# Now with wrap_phase set to the full period
tsf = ts.fold(period=3.2 * u.s, wrap_phase=1, normalize_phase=True)
assert isinstance(tsf.time, Quantity)
assert_allclose(tsf.time.to_value(u.one),
[0, 1/3.2, 2/3.2, 0.6/3.2, 1.6/3.2, 1.4/3.2],
rtol=1e-6)
# Now set epoch_phase to be 1/4 of the way through the phase
tsf = ts.fold(period=3.2 * u.s, epoch_phase=0.25, normalize_phase=True)
assert isinstance(tsf.time, Quantity)
assert_allclose(tsf.time.to_value(u.one),
[0.8/3.2, -1.4/3.2, -0.4/3.2, 1.4/3.2, -0.8/3.2, -1.0/3.2],
rtol=1e-6)
# And combining epoch_phase and wrap_phase
tsf = ts.fold(period=3.2 * u.s, epoch_phase=0.25, wrap_phase=1,
normalize_phase=True)
assert isinstance(tsf.time, Quantity)
assert_allclose(tsf.time.to_value(u.one),
[0.8/3.2, 1.8/3.2, 2.8/3.2, 1.4/3.2, 2.4/3.2, 2.2/3.2],
rtol=1e-6)
def test_fold_invalid_options():
times = Time([1, 2, 3, 8, 9, 12], format='unix')
ts = TimeSeries(time=times)
ts['flux'] = [1, 4, 4, 3, 2, 3]
with pytest.raises(u.UnitsError,
match='period should be a Quantity in units of time'):
ts.fold(period=3.2)
with pytest.raises(u.UnitsError,
match='period should be a Quantity in units of time'):
ts.fold(period=3.2 * u.m)
with pytest.raises(u.UnitsError,
match='epoch_phase should be a Quantity in units of '
'time when normalize_phase=False'):
ts.fold(period=3.2 * u.s, epoch_phase=0.2)
with pytest.raises(u.UnitsError,
match='epoch_phase should be a dimensionless Quantity '
'or a float when normalize_phase=True'):
ts.fold(period=3.2 * u.s, epoch_phase=0.2 * u.s, normalize_phase=True)
with pytest.raises(u.UnitsError,
match='wrap_phase should be a Quantity in units of '
'time when normalize_phase=False'):
ts.fold(period=3.2 * u.s, wrap_phase=0.2)
with pytest.raises(u.UnitsError,
match='wrap_phase should be dimensionless when '
'normalize_phase=True'):
ts.fold(period=3.2 * u.s, wrap_phase=0.2 * u.s, normalize_phase=True)
with pytest.raises(ValueError,
match='wrap_phase should be between 0 and the period'):
ts.fold(period=3.2 * u.s, wrap_phase=-0.1 * u.s)
with pytest.raises(ValueError,
match='wrap_phase should be between 0 and the period'):
ts.fold(period=3.2 * u.s, wrap_phase=-4.2 * u.s)
with pytest.raises(ValueError,
match='wrap_phase should be between 0 and 1'):
ts.fold(period=3.2 * u.s, wrap_phase=-0.1, normalize_phase=True)
with pytest.raises(ValueError,
match='wrap_phase should be between 0 and 1'):
ts.fold(period=3.2 * u.s, wrap_phase=2.2, normalize_phase=True)
def test_pandas():
pandas = pytest.importorskip("pandas")
df1 = pandas.DataFrame()
df1['a'] = [1, 2, 3]
df1.set_index(pandas.DatetimeIndex(INPUT_TIME.datetime64), inplace=True)
ts = TimeSeries.from_pandas(df1)
assert_equal(ts.time.isot, INPUT_TIME.isot)
assert ts.colnames == ['time', 'a']
assert len(ts.indices) == 1
assert (ts.indices['time'].columns[0] == INPUT_TIME).all()
ts_tcb = TimeSeries.from_pandas(df1, time_scale='tcb')
assert ts_tcb.time.scale == 'tcb'
df2 = ts.to_pandas()
assert (df2.index.values == pandas.Index(INPUT_TIME.datetime64).values).all()
assert df2.columns == pandas.Index(['a'])
assert (df1['a'] == df2['a']).all()
with pytest.raises(TypeError) as exc:
TimeSeries.from_pandas(None)
assert exc.value.args[0] == 'Input should be a pandas DataFrame'
df4 = pandas.DataFrame()
df4['a'] = [1, 2, 3]
with pytest.raises(TypeError) as exc:
TimeSeries.from_pandas(df4)
assert exc.value.args[0] == 'DataFrame does not have a DatetimeIndex'
def test_read_time_missing():
with pytest.raises(ValueError) as exc:
TimeSeries.read(CSV_FILE, format='csv')
assert exc.value.args[0] == '``time_column`` should be provided since the default Table readers are being used.'
def test_read_time_wrong():
with pytest.raises(ValueError) as exc:
TimeSeries.read(CSV_FILE, time_column='abc', format='csv')
assert exc.value.args[0] == "Time column 'abc' not found in the input data."
def test_read():
timeseries = TimeSeries.read(CSV_FILE, time_column='Date', format='csv')
assert timeseries.colnames == ['time', 'A', 'B', 'C', 'D', 'E', 'F', 'G']
assert len(timeseries) == 11
assert timeseries['time'].format == 'iso'
assert timeseries['A'].sum() == 266.5
@pytest.mark.remote_data(source='astropy')
def test_kepler_astropy():
from astropy.units import UnitsWarning
filename = get_pkg_data_filename('timeseries/kplr010666592-2009131110544_slc.fits')
with pytest.warns(UnitsWarning):
timeseries = TimeSeries.read(filename, format='kepler.fits')
assert timeseries["time"].format == 'isot'
assert timeseries["time"].scale == 'tdb'
assert timeseries["sap_flux"].unit.to_string() == 'electron / s'
assert len(timeseries) == 14280
assert len(timeseries.columns) == 20
@pytest.mark.remote_data(source='astropy')
def test_tess_astropy():
filename = get_pkg_data_filename('timeseries/hlsp_tess-data-alerts_tess_phot_00025155310-s01_tess_v1_lc.fits')
with pytest.warns(UserWarning, match='Ignoring 815 rows with NaN times'):
timeseries = TimeSeries.read(filename, format='tess.fits')
assert timeseries["time"].format == 'isot'
assert timeseries["time"].scale == 'tdb'
assert timeseries["sap_flux"].unit.to_string() == 'electron / s'
assert len(timeseries) == 19261
assert len(timeseries.columns) == 20
def test_required_columns():
# Test the machinery that makes sure that the required columns are present
ts = TimeSeries(time=INPUT_TIME,
data=[[10, 2, 3], [4, 5, 6]],
names=['a', 'b'])
# In the examples below, the operation (e.g. remove_column) is actually
# carried out before the checks are made, so we need to use copy() so that
# we don't change the main version of the time series.
# Make sure copy works fine
ts.copy()
with pytest.raises(ValueError) as exc:
ts.copy().add_column(Column([3, 4, 5], name='c'), index=0)
assert exc.value.args[0] == ("TimeSeries object is invalid - expected "
"'time' as the first column but found 'c'")
with pytest.raises(ValueError) as exc:
ts.copy().add_columns([Column([3, 4, 5], name='d'),
Column([3, 4, 5], name='e')], indexes=[0, 1])
assert exc.value.args[0] == ("TimeSeries object is invalid - expected "
"'time' as the first column but found 'd'")
with pytest.raises(ValueError) as exc:
ts.copy().keep_columns(['a', 'b'])
assert exc.value.args[0] == ("TimeSeries object is invalid - expected "
"'time' as the first column but found 'a'")
with pytest.raises(ValueError) as exc:
ts.copy().remove_column('time')
assert exc.value.args[0] == ("TimeSeries object is invalid - expected "
"'time' as the first column but found 'a'")
with pytest.raises(ValueError) as exc:
ts.copy().remove_columns(['time', 'a'])
assert exc.value.args[0] == ("TimeSeries object is invalid - expected "
"'time' as the first column but found 'b'")
with pytest.raises(ValueError) as exc:
ts.copy().rename_column('time', 'banana')
assert exc.value.args[0] == ("TimeSeries object is invalid - expected "
"'time' as the first column but found 'banana'")
# https://github.com/astropy/astropy/issues/13009
ts_2cols_required = ts.copy()
ts_2cols_required._required_columns = ['time', 'a']
with pytest.raises(ValueError) as exc:
ts_2cols_required.remove_column('a')
assert exc.value.args[0] == ("TimeSeries object is invalid - expected "
"['time', 'a'] as the first columns but found ['time', 'b']")
@pytest.mark.parametrize('cls', [BoxLeastSquares, LombScargle])
def test_periodogram(cls):
# Note that we don't need to check the actual results from the periodogram
# classes here since these are tested extensively in
# astropy.timeseries.periodograms.
ts = TimeSeries(time=INPUT_TIME,
data=[[10, 2, 3], [4, 5, 6]],
names=['a', 'b'])
p1 = cls.from_timeseries(ts, 'a')
assert isinstance(p1, cls)
assert_allclose(p1.t.jd, ts.time.jd)
assert_equal(p1.y, ts['a'])
assert p1.dy is None
p2 = cls.from_timeseries(ts, 'a', uncertainty='b')
assert_quantity_allclose(p2.dy, ts['b'])
p3 = cls.from_timeseries(ts, 'a', uncertainty=0.1)
assert_allclose(p3.dy, 0.1)
|
02d4ad1bee15f0358377837ae5423c76c623c4062dc6949100c829753400c554 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
__all__ = ["BoxLeastSquares", "BoxLeastSquaresResults"]
import numpy as np
from astropy import units
from astropy.time import Time, TimeDelta
from astropy.timeseries.periodograms.lombscargle.core import has_units, strip_units
from astropy import units as u
from . import methods
from astropy.timeseries.periodograms.base import BasePeriodogram
def validate_unit_consistency(reference_object, input_object):
if has_units(reference_object):
input_object = units.Quantity(input_object, unit=reference_object.unit)
else:
if has_units(input_object):
input_object = units.Quantity(input_object, unit=units.one)
input_object = input_object.value
return input_object
class BoxLeastSquares(BasePeriodogram):
"""Compute the box least squares periodogram
This method is a commonly used tool for discovering transiting exoplanets
or eclipsing binaries in photometric time series datasets. This
implementation is based on the "box least squares (BLS)" method described
in [1]_ and [2]_.
Parameters
----------
t : array-like, `~astropy.units.Quantity`, `~astropy.time.Time`, or `~astropy.time.TimeDelta`
Sequence of observation times.
y : array-like or `~astropy.units.Quantity`
Sequence of observations associated with times ``t``.
dy : float, array-like, or `~astropy.units.Quantity`, optional
Error or sequence of observational errors associated with times ``t``.
Examples
--------
Generate noisy data with a transit:
>>> rand = np.random.default_rng(42)
>>> t = rand.uniform(0, 10, 500)
>>> y = np.ones_like(t)
>>> y[np.abs((t + 1.0)%2.0-1)<0.08] = 1.0 - 0.1
>>> y += 0.01 * rand.standard_normal(len(t))
Compute the transit periodogram on a heuristically determined period grid
and find the period with maximum power:
>>> model = BoxLeastSquares(t, y)
>>> results = model.autopower(0.16)
>>> results.period[np.argmax(results.power)] # doctest: +FLOAT_CMP
2.000412388152837
Compute the periodogram on a user-specified period grid:
>>> periods = np.linspace(1.9, 2.1, 5)
>>> results = model.power(periods, 0.16)
>>> results.power # doctest: +FLOAT_CMP
array([0.01723948, 0.0643028 , 0.1338783 , 0.09428816, 0.03577543])
If the inputs are AstroPy Quantities with units, the units will be
validated and the outputs will also be Quantities with appropriate units:
>>> from astropy import units as u
>>> t = t * u.day
>>> y = y * u.dimensionless_unscaled
>>> model = BoxLeastSquares(t, y)
>>> results = model.autopower(0.16 * u.day)
>>> results.period.unit
Unit("d")
>>> results.power.unit
Unit(dimensionless)
References
----------
.. [1] Kovacs, Zucker, & Mazeh (2002), A&A, 391, 369
(arXiv:astro-ph/0206099)
.. [2] Hartman & Bakos (2016), Astronomy & Computing, 17, 1
(arXiv:1605.06811)
"""
def __init__(self, t, y, dy=None):
# If t is a TimeDelta, convert it to a quantity. The units we convert
# to don't really matter since the user gets a Quantity back at the end
# so can convert to any units they like.
if isinstance(t, TimeDelta):
t = t.to('day')
# We want to expose self.t as being the times the user passed in, but
# if the times are absolute, we need to convert them to relative times
# internally, so we use self._trel and self._tstart for this.
self.t = t
if isinstance(self.t, (Time, TimeDelta)):
self._tstart = self.t[0]
trel = (self.t - self._tstart).to(u.day)
else:
self._tstart = None
trel = self.t
self._trel, self.y, self.dy = self._validate_inputs(trel, y, dy)
def autoperiod(self, duration,
minimum_period=None, maximum_period=None,
minimum_n_transit=3, frequency_factor=1.0):
"""Determine a suitable grid of periods
This method uses a set of heuristics to select a conservative period
grid that is uniform in frequency. This grid might be too fine for
some user's needs depending on the precision requirements or the
sampling of the data. The grid can be made coarser by increasing
``frequency_factor``.
Parameters
----------
duration : float, array-like, or `~astropy.units.Quantity` ['time']
The set of durations that will be considered.
minimum_period, maximum_period : float or `~astropy.units.Quantity` ['time'], optional
The minimum/maximum periods to search. If not provided, these will
be computed as described in the notes below.
minimum_n_transit : int, optional
If ``maximum_period`` is not provided, this is used to compute the
maximum period to search by asserting that any systems with at
least ``minimum_n_transits`` will be within the range of searched
periods. Note that this is not the same as requiring that
``minimum_n_transits`` be required for detection. The default
value is ``3``.
frequency_factor : float, optional
A factor to control the frequency spacing as described in the
notes below. The default value is ``1.0``.
Returns
-------
period : array-like or `~astropy.units.Quantity` ['time']
The set of periods computed using these heuristics with the same
units as ``t``.
Notes
-----
The default minimum period is chosen to be twice the maximum duration
because there won't be much sensitivity to periods shorter than that.
The default maximum period is computed as
.. code-block:: python
maximum_period = (max(t) - min(t)) / minimum_n_transits
ensuring that any systems with at least ``minimum_n_transits`` are
within the range of searched periods.
The frequency spacing is given by
.. code-block:: python
df = frequency_factor * min(duration) / (max(t) - min(t))**2
so the grid can be made finer by decreasing ``frequency_factor`` or
coarser by increasing ``frequency_factor``.
"""
duration = self._validate_duration(duration)
baseline = strip_units(self._trel.max() - self._trel.min())
min_duration = strip_units(np.min(duration))
# Estimate the required frequency spacing
# Because of the sparsity of a transit, this must be much finer than
# the frequency resolution for a sinusoidal fit. For a sinusoidal fit,
# df would be 1/baseline (see LombScargle), but here this should be
# scaled proportionally to the duration in units of baseline.
df = frequency_factor * min_duration / baseline**2
# If a minimum period is not provided, choose one that is twice the
# maximum duration because we won't be sensitive to any periods
# shorter than that.
if minimum_period is None:
minimum_period = 2.0 * strip_units(np.max(duration))
else:
minimum_period = validate_unit_consistency(self._trel, minimum_period)
minimum_period = strip_units(minimum_period)
# If no maximum period is provided, choose one by requiring that
# all signals with at least minimum_n_transit should be detectable.
if maximum_period is None:
if minimum_n_transit <= 1:
raise ValueError("minimum_n_transit must be greater than 1")
maximum_period = baseline / (minimum_n_transit-1)
else:
maximum_period = validate_unit_consistency(self._trel, maximum_period)
maximum_period = strip_units(maximum_period)
if maximum_period < minimum_period:
minimum_period, maximum_period = maximum_period, minimum_period
if minimum_period <= 0.0:
raise ValueError("minimum_period must be positive")
# Convert bounds to frequency
minimum_frequency = 1.0/strip_units(maximum_period)
maximum_frequency = 1.0/strip_units(minimum_period)
# Compute the number of frequencies and the frequency grid
nf = 1 + int(np.round((maximum_frequency - minimum_frequency)/df))
return 1.0/(maximum_frequency-df*np.arange(nf)) * self._t_unit()
def autopower(self, duration, objective=None, method=None, oversample=10,
minimum_n_transit=3, minimum_period=None,
maximum_period=None, frequency_factor=1.0):
"""Compute the periodogram at set of heuristically determined periods
This method calls :func:`BoxLeastSquares.autoperiod` to determine
the period grid and then :func:`BoxLeastSquares.power` to compute
the periodogram. See those methods for documentation of the arguments.
"""
period = self.autoperiod(duration,
minimum_n_transit=minimum_n_transit,
minimum_period=minimum_period,
maximum_period=maximum_period,
frequency_factor=frequency_factor)
return self.power(period, duration, objective=objective, method=method,
oversample=oversample)
def power(self, period, duration, objective=None, method=None,
oversample=10):
"""Compute the periodogram for a set of periods
Parameters
----------
period : array-like or `~astropy.units.Quantity` ['time']
The periods where the power should be computed
duration : float, array-like, or `~astropy.units.Quantity` ['time']
The set of durations to test
objective : {'likelihood', 'snr'}, optional
The scalar that should be optimized to find the best fit phase,
duration, and depth. This can be either ``'likelihood'`` (default)
to optimize the log-likelihood of the model, or ``'snr'`` to
optimize the signal-to-noise with which the transit depth is
measured.
method : {'fast', 'slow'}, optional
The computational method used to compute the periodogram. This is
mainly included for the purposes of testing and most users will
want to use the optimized ``'fast'`` method (default) that is
implemented in Cython. ``'slow'`` is a brute-force method that is
used to test the results of the ``'fast'`` method.
oversample : int, optional
The number of bins per duration that should be used. This sets the
time resolution of the phase fit with larger values of
``oversample`` yielding a finer grid and higher computational cost.
Returns
-------
results : BoxLeastSquaresResults
The periodogram results as a :class:`BoxLeastSquaresResults`
object.
Raises
------
ValueError
If ``oversample`` is not an integer greater than 0 or if
``objective`` or ``method`` are not valid.
"""
period, duration = self._validate_period_and_duration(period, duration)
# Check for absurdities in the ``oversample`` choice
try:
oversample = int(oversample)
except TypeError:
raise ValueError(f"oversample must be an int, got {oversample}")
if oversample < 1:
raise ValueError("oversample must be greater than or equal to 1")
# Select the periodogram objective
if objective is None:
objective = "likelihood"
allowed_objectives = ["snr", "likelihood"]
if objective not in allowed_objectives:
raise ValueError(("Unrecognized method '{0}'\n"
"allowed methods are: {1}")
.format(objective, allowed_objectives))
use_likelihood = (objective == "likelihood")
# Select the computational method
if method is None:
method = "fast"
allowed_methods = ["fast", "slow"]
if method not in allowed_methods:
raise ValueError(("Unrecognized method '{0}'\n"
"allowed methods are: {1}")
.format(method, allowed_methods))
# Format and check the input arrays
t = np.ascontiguousarray(strip_units(self._trel), dtype=np.float64)
t_ref = np.min(t)
y = np.ascontiguousarray(strip_units(self.y), dtype=np.float64)
if self.dy is None:
ivar = np.ones_like(y)
else:
ivar = 1.0 / np.ascontiguousarray(strip_units(self.dy),
dtype=np.float64)**2
# Make sure that the period and duration arrays are C-order
period_fmt = np.ascontiguousarray(strip_units(period),
dtype=np.float64)
duration = np.ascontiguousarray(strip_units(duration),
dtype=np.float64)
# Select the correct implementation for the chosen method
if method == "fast":
bls = methods.bls_fast
else:
bls = methods.bls_slow
# Run the implementation
results = bls(
t - t_ref, y - np.median(y), ivar, period_fmt, duration,
oversample, use_likelihood)
return self._format_results(t_ref, objective, period, results)
def _as_relative_time(self, name, times):
"""
Convert the provided times (if absolute) to relative times using the
current _tstart value. If the times provided are relative, they are
returned without conversion (though we still do some checks).
"""
if isinstance(times, TimeDelta):
times = times.to('day')
if self._tstart is None:
if isinstance(times, Time):
raise TypeError('{} was provided as an absolute time but '
'the BoxLeastSquares class was initialized '
'with relative times.'.format(name))
else:
if isinstance(times, Time):
times = (times - self._tstart).to(u.day)
else:
raise TypeError('{} was provided as a relative time but '
'the BoxLeastSquares class was initialized '
'with absolute times.'.format(name))
times = validate_unit_consistency(self._trel, times)
return times
def _as_absolute_time_if_needed(self, name, times):
"""
Convert the provided times to absolute times using the current _tstart
value, if needed.
"""
if self._tstart is not None:
# Some time formats/scales can't represent dates/times too far
# off from the present, so we need to mask values offset by
# more than 100,000 yr (the periodogram algorithm can return
# transit times of e.g 1e300 for some periods).
reset = np.abs(times.to_value(u.year)) > 100000
times[reset] = 0
times = self._tstart + times
times[reset] = np.nan
return times
def model(self, t_model, period, duration, transit_time):
"""Compute the transit model at the given period, duration, and phase
Parameters
----------
t_model : array-like, `~astropy.units.Quantity`, or `~astropy.time.Time`
Times at which to compute the model.
period : float or `~astropy.units.Quantity` ['time']
The period of the transits.
duration : float or `~astropy.units.Quantity` ['time']
The duration of the transit.
transit_time : float or `~astropy.units.Quantity` or `~astropy.time.Time`
The mid-transit time of a reference transit.
Returns
-------
y_model : array-like or `~astropy.units.Quantity`
The model evaluated at the times ``t_model`` with units of ``y``.
"""
period, duration = self._validate_period_and_duration(period, duration)
transit_time = self._as_relative_time('transit_time', transit_time)
t_model = strip_units(self._as_relative_time('t_model', t_model))
period = float(strip_units(period))
duration = float(strip_units(duration))
transit_time = float(strip_units(transit_time))
t = np.ascontiguousarray(strip_units(self._trel), dtype=np.float64)
y = np.ascontiguousarray(strip_units(self.y), dtype=np.float64)
if self.dy is None:
ivar = np.ones_like(y)
else:
ivar = 1.0 / np.ascontiguousarray(strip_units(self.dy),
dtype=np.float64)**2
# Compute the depth
hp = 0.5*period
m_in = np.abs((t-transit_time+hp) % period - hp) < 0.5*duration
m_out = ~m_in
y_in = np.sum(y[m_in] * ivar[m_in]) / np.sum(ivar[m_in])
y_out = np.sum(y[m_out] * ivar[m_out]) / np.sum(ivar[m_out])
# Evaluate the model
y_model = y_out + np.zeros_like(t_model)
m_model = np.abs((t_model-transit_time+hp) % period-hp) < 0.5*duration
y_model[m_model] = y_in
return y_model * self._y_unit()
def compute_stats(self, period, duration, transit_time):
"""Compute descriptive statistics for a given transit model
These statistics are commonly used for vetting of transit candidates.
Parameters
----------
period : float or `~astropy.units.Quantity` ['time']
The period of the transits.
duration : float or `~astropy.units.Quantity` ['time']
The duration of the transit.
transit_time : float or `~astropy.units.Quantity` or `~astropy.time.Time`
The mid-transit time of a reference transit.
Returns
-------
stats : dict
A dictionary containing several descriptive statistics:
- ``depth``: The depth and uncertainty (as a tuple with two
values) on the depth for the fiducial model.
- ``depth_odd``: The depth and uncertainty on the depth for a
model where the period is twice the fiducial period.
- ``depth_even``: The depth and uncertainty on the depth for a
model where the period is twice the fiducial period and the
phase is offset by one orbital period.
- ``depth_half``: The depth and uncertainty for a model with a
period of half the fiducial period.
- ``depth_phased``: The depth and uncertainty for a model with the
fiducial period and the phase offset by half a period.
- ``harmonic_amplitude``: The amplitude of the best fit sinusoidal
model.
- ``harmonic_delta_log_likelihood``: The difference in log
likelihood between a sinusoidal model and the transit model.
If ``harmonic_delta_log_likelihood`` is greater than zero, the
sinusoidal model is preferred.
- ``transit_times``: The mid-transit time for each transit in the
baseline.
- ``per_transit_count``: An array with a count of the number of
data points in each unique transit included in the baseline.
- ``per_transit_log_likelihood``: An array with the value of the
log likelihood for each unique transit included in the
baseline.
"""
period, duration = self._validate_period_and_duration(period, duration)
transit_time = self._as_relative_time('transit_time', transit_time)
period = float(strip_units(period))
duration = float(strip_units(duration))
transit_time = float(strip_units(transit_time))
t = np.ascontiguousarray(strip_units(self._trel), dtype=np.float64)
y = np.ascontiguousarray(strip_units(self.y), dtype=np.float64)
if self.dy is None:
ivar = np.ones_like(y)
else:
ivar = 1.0 / np.ascontiguousarray(strip_units(self.dy),
dtype=np.float64)**2
# This a helper function that will compute the depth for several
# different hypothesized transit models with different parameters
def _compute_depth(m, y_out=None, var_out=None):
if np.any(m) and (var_out is None or np.isfinite(var_out)):
var_m = 1.0 / np.sum(ivar[m])
y_m = np.sum(y[m] * ivar[m]) * var_m
if y_out is None:
return y_m, var_m
return y_out - y_m, np.sqrt(var_m + var_out)
return 0.0, np.inf
# Compute the depth of the fiducial model and the two models at twice
# the period
hp = 0.5*period
m_in = np.abs((t-transit_time+hp) % period - hp) < 0.5*duration
m_out = ~m_in
m_odd = np.abs((t-transit_time) % (2*period) - period) \
< 0.5*duration
m_even = np.abs((t-transit_time+period) % (2*period) - period) \
< 0.5*duration
y_out, var_out = _compute_depth(m_out)
depth = _compute_depth(m_in, y_out, var_out)
depth_odd = _compute_depth(m_odd, y_out, var_out)
depth_even = _compute_depth(m_even, y_out, var_out)
y_in = y_out - depth[0]
# Compute the depth of the model at a phase of 0.5*period
m_phase = np.abs((t-transit_time) % period - hp) < 0.5*duration
depth_phase = _compute_depth(m_phase,
*_compute_depth((~m_phase) & m_out))
# Compute the depth of a model with a period of 0.5*period
m_half = np.abs((t-transit_time+0.25*period) % (0.5*period)
- 0.25*period) < 0.5*duration
depth_half = _compute_depth(m_half, *_compute_depth(~m_half))
# Compute the number of points in each transit
transit_id = np.round((t[m_in]-transit_time) / period).astype(int)
transit_times = period * np.arange(transit_id.min(),
transit_id.max()+1) + transit_time
unique_ids, unique_counts = np.unique(transit_id,
return_counts=True)
unique_ids -= np.min(transit_id)
transit_id -= np.min(transit_id)
counts = np.zeros(np.max(transit_id) + 1, dtype=int)
counts[unique_ids] = unique_counts
# Compute the per-transit log likelihood
ll = -0.5 * ivar[m_in] * ((y[m_in] - y_in)**2 - (y[m_in] - y_out)**2)
lls = np.zeros(len(counts))
for i in unique_ids:
lls[i] = np.sum(ll[transit_id == i])
full_ll = -0.5*np.sum(ivar[m_in] * (y[m_in] - y_in)**2)
full_ll -= 0.5*np.sum(ivar[m_out] * (y[m_out] - y_out)**2)
# Compute the log likelihood of a sine model
A = np.vstack((
np.sin(2*np.pi*t/period), np.cos(2*np.pi*t/period),
np.ones_like(t)
)).T
w = np.linalg.solve(np.dot(A.T, A * ivar[:, None]),
np.dot(A.T, y * ivar))
mod = np.dot(A, w)
sin_ll = -0.5*np.sum((y-mod)**2*ivar)
# Format the results
y_unit = self._y_unit()
ll_unit = 1
if self.dy is None:
ll_unit = y_unit * y_unit
return dict(
transit_times=self._as_absolute_time_if_needed('transit_times', transit_times * self._t_unit()),
per_transit_count=counts,
per_transit_log_likelihood=lls * ll_unit,
depth=(depth[0] * y_unit, depth[1] * y_unit),
depth_phased=(depth_phase[0] * y_unit, depth_phase[1] * y_unit),
depth_half=(depth_half[0] * y_unit, depth_half[1] * y_unit),
depth_odd=(depth_odd[0] * y_unit, depth_odd[1] * y_unit),
depth_even=(depth_even[0] * y_unit, depth_even[1] * y_unit),
harmonic_amplitude=np.sqrt(np.sum(w[:2]**2)) * y_unit,
harmonic_delta_log_likelihood=(sin_ll - full_ll) * ll_unit,
)
def transit_mask(self, t, period, duration, transit_time):
"""Compute which data points are in transit for a given parameter set
Parameters
----------
t : array-like or `~astropy.units.Quantity` ['time']
Times where the mask should be evaluated.
period : float or `~astropy.units.Quantity` ['time']
The period of the transits.
duration : float or `~astropy.units.Quantity` ['time']
The duration of the transit.
transit_time : float or `~astropy.units.Quantity` or `~astropy.time.Time`
The mid-transit time of a reference transit.
Returns
-------
transit_mask : array-like
A boolean array where ``True`` indicates and in transit point and
``False`` indicates and out-of-transit point.
"""
period, duration = self._validate_period_and_duration(period, duration)
transit_time = self._as_relative_time('transit_time', transit_time)
t = strip_units(self._as_relative_time('t', t))
period = float(strip_units(period))
duration = float(strip_units(duration))
transit_time = float(strip_units(transit_time))
hp = 0.5*period
return np.abs((t-transit_time+hp) % period - hp) < 0.5*duration
def _validate_inputs(self, t, y, dy):
"""Private method used to check the consistency of the inputs
Parameters
----------
t : array-like, `~astropy.units.Quantity`, `~astropy.time.Time`, or `~astropy.time.TimeDelta`
Sequence of observation times.
y : array-like or `~astropy.units.Quantity`
Sequence of observations associated with times t.
dy : float, array-like, or `~astropy.units.Quantity`
Error or sequence of observational errors associated with times t.
Returns
-------
t, y, dy : array-like, `~astropy.units.Quantity`, or `~astropy.time.Time`
The inputs with consistent shapes and units.
Raises
------
ValueError
If the dimensions are incompatible or if the units of dy cannot be
converted to the units of y.
"""
# Validate shapes of inputs
if dy is None:
t, y = np.broadcast_arrays(t, y, subok=True)
else:
t, y, dy = np.broadcast_arrays(t, y, dy, subok=True)
if t.ndim != 1:
raise ValueError("Inputs (t, y, dy) must be 1-dimensional")
# validate units of inputs if any is a Quantity
if dy is not None:
dy = validate_unit_consistency(y, dy)
return t, y, dy
def _validate_duration(self, duration):
"""Private method used to check a set of test durations
Parameters
----------
duration : float, array-like, or `~astropy.units.Quantity`
The set of durations that will be considered.
Returns
-------
duration : array-like or `~astropy.units.Quantity`
The input reformatted with the correct shape and units.
Raises
------
ValueError
If the units of duration cannot be converted to the units of t.
"""
duration = np.atleast_1d(np.abs(duration))
if duration.ndim != 1 or duration.size == 0:
raise ValueError("duration must be 1-dimensional")
return validate_unit_consistency(self._trel, duration)
def _validate_period_and_duration(self, period, duration):
"""Private method used to check a set of periods and durations
Parameters
----------
period : float, array-like, or `~astropy.units.Quantity` ['time']
The set of test periods.
duration : float, array-like, or `~astropy.units.Quantity` ['time']
The set of durations that will be considered.
Returns
-------
period, duration : array-like or `~astropy.units.Quantity` ['time']
The inputs reformatted with the correct shapes and units.
Raises
------
ValueError
If the units of period or duration cannot be converted to the
units of t.
"""
duration = self._validate_duration(duration)
period = np.atleast_1d(np.abs(period))
if period.ndim != 1 or period.size == 0:
raise ValueError("period must be 1-dimensional")
period = validate_unit_consistency(self._trel, period)
if not np.min(period) > np.max(duration):
raise ValueError("The maximum transit duration must be shorter "
"than the minimum period")
return period, duration
def _format_results(self, t_ref, objective, period, results):
"""A private method used to wrap and add units to the periodogram
Parameters
----------
t_ref : float
The minimum time in the time series (a reference time).
objective : str
The name of the objective used in the optimization.
period : array-like or `~astropy.units.Quantity` ['time']
The set of trial periods.
results : tuple
The output of one of the periodogram implementations.
"""
(power, depth, depth_err, duration, transit_time, depth_snr,
log_likelihood) = results
transit_time += t_ref
if has_units(self._trel):
transit_time = units.Quantity(transit_time, unit=self._trel.unit)
transit_time = self._as_absolute_time_if_needed('transit_time', transit_time)
duration = units.Quantity(duration, unit=self._trel.unit)
if has_units(self.y):
depth = units.Quantity(depth, unit=self.y.unit)
depth_err = units.Quantity(depth_err, unit=self.y.unit)
depth_snr = units.Quantity(depth_snr, unit=units.one)
if self.dy is None:
if objective == "likelihood":
power = units.Quantity(power, unit=self.y.unit**2)
else:
power = units.Quantity(power, unit=units.one)
log_likelihood = units.Quantity(log_likelihood,
unit=self.y.unit**2)
else:
power = units.Quantity(power, unit=units.one)
log_likelihood = units.Quantity(log_likelihood, unit=units.one)
return BoxLeastSquaresResults(
objective, period, power, depth, depth_err, duration, transit_time,
depth_snr, log_likelihood)
def _t_unit(self):
if has_units(self._trel):
return self._trel.unit
else:
return 1
def _y_unit(self):
if has_units(self.y):
return self.y.unit
else:
return 1
class BoxLeastSquaresResults(dict):
"""The results of a BoxLeastSquares search
Attributes
----------
objective : str
The scalar used to optimize to find the best fit phase, duration, and
depth. See :func:`BoxLeastSquares.power` for more information.
period : array-like or `~astropy.units.Quantity` ['time']
The set of test periods.
power : array-like or `~astropy.units.Quantity`
The periodogram evaluated at the periods in ``period``. If
``objective`` is:
* ``'likelihood'``: the values of ``power`` are the
log likelihood maximized over phase, depth, and duration, or
* ``'snr'``: the values of ``power`` are the signal-to-noise with
which the depth is measured maximized over phase, depth, and
duration.
depth : array-like or `~astropy.units.Quantity`
The estimated depth of the maximum power model at each period.
depth_err : array-like or `~astropy.units.Quantity`
The 1-sigma uncertainty on ``depth``.
duration : array-like or `~astropy.units.Quantity` ['time']
The maximum power duration at each period.
transit_time : array-like, `~astropy.units.Quantity`, or `~astropy.time.Time`
The maximum power phase of the transit in units of time. This
indicates the mid-transit time and it will always be in the range
(0, period).
depth_snr : array-like or `~astropy.units.Quantity`
The signal-to-noise with which the depth is measured at maximum power.
log_likelihood : array-like or `~astropy.units.Quantity`
The log likelihood of the maximum power model.
"""
def __init__(self, *args):
super().__init__(zip(
("objective", "period", "power", "depth", "depth_err",
"duration", "transit_time", "depth_snr", "log_likelihood"),
args
))
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __repr__(self):
if self.keys():
m = max(map(len, list(self.keys()))) + 1
return '\n'.join([k.rjust(m) + ': ' + repr(v)
for k, v in sorted(self.items())])
else:
return self.__class__.__name__ + "()"
def __dir__(self):
return list(self.keys())
|
9a5d66b20e38744b035823fa6eb12dad83a620038191a6bd5dbe527e28ee1bf1 | """Main Lomb-Scargle Implementation"""
import numpy as np
from .implementations import lombscargle, available_methods
from .implementations.mle import periodic_fit, design_matrix
from . import _statistics
from astropy import units
from astropy.time import Time, TimeDelta
from astropy import units as u
from astropy.timeseries.periodograms.base import BasePeriodogram
def has_units(obj):
return hasattr(obj, 'unit')
def get_unit(obj):
return getattr(obj, 'unit', 1)
def strip_units(*arrs):
strip = lambda a: None if a is None else np.asarray(a)
if len(arrs) == 1:
return strip(arrs[0])
else:
return map(strip, arrs)
class LombScargle(BasePeriodogram):
"""Compute the Lomb-Scargle Periodogram.
This implementations here are based on code presented in [1]_ and [2]_;
if you use this functionality in an academic application, citation of
those works would be appreciated.
Parameters
----------
t : array-like or `~astropy.units.Quantity` ['time']
sequence of observation times
y : array-like or `~astropy.units.Quantity`
sequence of observations associated with times t
dy : float, array-like, or `~astropy.units.Quantity`, optional
error or sequence of observational errors associated with times t
fit_mean : bool, optional
if True, include a constant offset as part of the model at each
frequency. This can lead to more accurate results, especially in the
case of incomplete phase coverage.
center_data : bool, optional
if True, pre-center the data by subtracting the weighted mean
of the input data. This is especially important if fit_mean = False
nterms : int, optional
number of terms to use in the Fourier fit
normalization : {'standard', 'model', 'log', 'psd'}, optional
Normalization to use for the periodogram.
Examples
--------
Generate noisy periodic data:
>>> rand = np.random.default_rng(42)
>>> t = 100 * rand.random(100)
>>> y = np.sin(2 * np.pi * t) + rand.standard_normal(100)
Compute the Lomb-Scargle periodogram on an automatically-determined
frequency grid & find the frequency of max power:
>>> frequency, power = LombScargle(t, y).autopower()
>>> frequency[np.argmax(power)] # doctest: +FLOAT_CMP
1.0007641728995051
Compute the Lomb-Scargle periodogram at a user-specified frequency grid:
>>> freq = np.arange(0.8, 1.3, 0.1)
>>> LombScargle(t, y).power(freq) # doctest: +FLOAT_CMP
array([0.0792948 , 0.01778874, 0.25328167, 0.01064157, 0.01471387])
If the inputs are astropy Quantities with units, the units will be
validated and the outputs will also be Quantities with appropriate units:
>>> from astropy import units as u
>>> t = t * u.s
>>> y = y * u.mag
>>> frequency, power = LombScargle(t, y).autopower()
>>> frequency.unit
Unit("1 / s")
>>> power.unit
Unit(dimensionless)
Note here that the Lomb-Scargle power is always a unitless quantity,
because it is related to the :math:`\\chi^2` of the best-fit periodic
model at each frequency.
References
----------
.. [1] Vanderplas, J., Connolly, A. Ivezic, Z. & Gray, A. *Introduction to
astroML: Machine learning for astrophysics*. Proceedings of the
Conference on Intelligent Data Understanding (2012)
.. [2] VanderPlas, J. & Ivezic, Z. *Periodograms for Multiband Astronomical
Time Series*. ApJ 812.1:18 (2015)
"""
available_methods = available_methods()
def __init__(self, t, y, dy=None, fit_mean=True, center_data=True,
nterms=1, normalization='standard'):
# If t is a TimeDelta, convert it to a quantity. The units we convert
# to don't really matter since the user gets a Quantity back at the end
# so can convert to any units they like.
if isinstance(t, TimeDelta):
t = t.to('day')
# We want to expose self.t as being the times the user passed in, but
# if the times are absolute, we need to convert them to relative times
# internally, so we use self._trel and self._tstart for this.
self.t = t
if isinstance(self.t, Time):
self._tstart = self.t[0]
trel = (self.t - self._tstart).to(u.day)
else:
self._tstart = None
trel = self.t
self._trel, self.y, self.dy = self._validate_inputs(trel, y, dy)
self.fit_mean = fit_mean
self.center_data = center_data
self.nterms = nterms
self.normalization = normalization
def _validate_inputs(self, t, y, dy):
# Validate shapes of inputs
if dy is None:
t, y = np.broadcast_arrays(t, y, subok=True)
else:
t, y, dy = np.broadcast_arrays(t, y, dy, subok=True)
if t.ndim != 1:
raise ValueError("Inputs (t, y, dy) must be 1-dimensional")
# validate units of inputs if any is a Quantity
if any(has_units(arr) for arr in (t, y, dy)):
t, y = map(units.Quantity, (t, y))
if dy is not None:
dy = units.Quantity(dy)
try:
dy = units.Quantity(dy, unit=y.unit)
except units.UnitConversionError:
raise ValueError("Units of dy not equivalent "
"to units of y")
return t, y, dy
def _validate_frequency(self, frequency):
frequency = np.asanyarray(frequency)
if has_units(self._trel):
frequency = units.Quantity(frequency)
try:
frequency = units.Quantity(frequency, unit=1./self._trel.unit)
except units.UnitConversionError:
raise ValueError("Units of frequency not equivalent to "
"units of 1/t")
else:
if has_units(frequency):
raise ValueError("frequency have units while 1/t doesn't.")
return frequency
def _validate_t(self, t):
t = np.asanyarray(t)
if has_units(self._trel):
t = units.Quantity(t)
try:
t = units.Quantity(t, unit=self._trel.unit)
except units.UnitConversionError:
raise ValueError("Units of t not equivalent to "
"units of input self.t")
return t
def _power_unit(self, norm):
if has_units(self.y):
if self.dy is None and norm == 'psd':
return self.y.unit ** 2
else:
return units.dimensionless_unscaled
else:
return 1
def autofrequency(self, samples_per_peak=5, nyquist_factor=5,
minimum_frequency=None, maximum_frequency=None,
return_freq_limits=False):
"""Determine a suitable frequency grid for data.
Note that this assumes the peak width is driven by the observational
baseline, which is generally a good assumption when the baseline is
much larger than the oscillation period.
If you are searching for periods longer than the baseline of your
observations, this may not perform well.
Even with a large baseline, be aware that the maximum frequency
returned is based on the concept of "average Nyquist frequency", which
may not be useful for irregularly-sampled data. The maximum frequency
can be adjusted via the nyquist_factor argument, or through the
maximum_frequency argument.
Parameters
----------
samples_per_peak : float, optional
The approximate number of desired samples across the typical peak
nyquist_factor : float, optional
The multiple of the average nyquist frequency used to choose the
maximum frequency if maximum_frequency is not provided.
minimum_frequency : float, optional
If specified, then use this minimum frequency rather than one
chosen based on the size of the baseline.
maximum_frequency : float, optional
If specified, then use this maximum frequency rather than one
chosen based on the average nyquist frequency.
return_freq_limits : bool, optional
if True, return only the frequency limits rather than the full
frequency grid.
Returns
-------
frequency : ndarray or `~astropy.units.Quantity` ['frequency']
The heuristically-determined optimal frequency bin
"""
baseline = self._trel.max() - self._trel.min()
n_samples = self._trel.size
df = 1.0 / baseline / samples_per_peak
if minimum_frequency is None:
minimum_frequency = 0.5 * df
if maximum_frequency is None:
avg_nyquist = 0.5 * n_samples / baseline
maximum_frequency = nyquist_factor * avg_nyquist
Nf = 1 + int(np.round((maximum_frequency - minimum_frequency) / df))
if return_freq_limits:
return minimum_frequency, minimum_frequency + df * (Nf - 1)
else:
return minimum_frequency + df * np.arange(Nf)
def autopower(self, method='auto', method_kwds=None,
normalization=None, samples_per_peak=5,
nyquist_factor=5, minimum_frequency=None,
maximum_frequency=None):
"""Compute Lomb-Scargle power at automatically-determined frequencies.
Parameters
----------
method : str, optional
specify the lomb scargle implementation to use. Options are:
- 'auto': choose the best method based on the input
- 'fast': use the O[N log N] fast method. Note that this requires
evenly-spaced frequencies: by default this will be checked unless
``assume_regular_frequency`` is set to True.
- 'slow': use the O[N^2] pure-python implementation
- 'cython': use the O[N^2] cython implementation. This is slightly
faster than method='slow', but much more memory efficient.
- 'chi2': use the O[N^2] chi2/linear-fitting implementation
- 'fastchi2': use the O[N log N] chi2 implementation. Note that this
requires evenly-spaced frequencies: by default this will be checked
unless ``assume_regular_frequency`` is set to True.
- 'scipy': use ``scipy.signal.lombscargle``, which is an O[N^2]
implementation written in C. Note that this does not support
heteroskedastic errors.
method_kwds : dict, optional
additional keywords to pass to the lomb-scargle method
normalization : {'standard', 'model', 'log', 'psd'}, optional
If specified, override the normalization specified at instantiation.
samples_per_peak : float, optional
The approximate number of desired samples across the typical peak
nyquist_factor : float, optional
The multiple of the average nyquist frequency used to choose the
maximum frequency if maximum_frequency is not provided.
minimum_frequency : float or `~astropy.units.Quantity` ['frequency'], optional
If specified, then use this minimum frequency rather than one
chosen based on the size of the baseline. Should be `~astropy.units.Quantity`
if inputs to LombScargle are `~astropy.units.Quantity`.
maximum_frequency : float or `~astropy.units.Quantity` ['frequency'], optional
If specified, then use this maximum frequency rather than one
chosen based on the average nyquist frequency. Should be `~astropy.units.Quantity`
if inputs to LombScargle are `~astropy.units.Quantity`.
Returns
-------
frequency, power : ndarray
The frequency and Lomb-Scargle power
"""
frequency = self.autofrequency(samples_per_peak=samples_per_peak,
nyquist_factor=nyquist_factor,
minimum_frequency=minimum_frequency,
maximum_frequency=maximum_frequency)
power = self.power(frequency,
normalization=normalization,
method=method, method_kwds=method_kwds,
assume_regular_frequency=True)
return frequency, power
def power(self, frequency, normalization=None, method='auto',
assume_regular_frequency=False, method_kwds=None):
"""Compute the Lomb-Scargle power at the given frequencies.
Parameters
----------
frequency : array-like or `~astropy.units.Quantity` ['frequency']
frequencies (not angular frequencies) at which to evaluate the
periodogram. Note that in order to use method='fast', frequencies
must be regularly-spaced.
method : str, optional
specify the lomb scargle implementation to use. Options are:
- 'auto': choose the best method based on the input
- 'fast': use the O[N log N] fast method. Note that this requires
evenly-spaced frequencies: by default this will be checked unless
``assume_regular_frequency`` is set to True.
- 'slow': use the O[N^2] pure-python implementation
- 'cython': use the O[N^2] cython implementation. This is slightly
faster than method='slow', but much more memory efficient.
- 'chi2': use the O[N^2] chi2/linear-fitting implementation
- 'fastchi2': use the O[N log N] chi2 implementation. Note that this
requires evenly-spaced frequencies: by default this will be checked
unless ``assume_regular_frequency`` is set to True.
- 'scipy': use ``scipy.signal.lombscargle``, which is an O[N^2]
implementation written in C. Note that this does not support
heteroskedastic errors.
assume_regular_frequency : bool, optional
if True, assume that the input frequency is of the form
freq = f0 + df * np.arange(N). Only referenced if method is 'auto'
or 'fast'.
normalization : {'standard', 'model', 'log', 'psd'}, optional
If specified, override the normalization specified at instantiation.
method_kwds : dict, optional
additional keywords to pass to the lomb-scargle method
Returns
-------
power : ndarray
The Lomb-Scargle power at the specified frequency
"""
if normalization is None:
normalization = self.normalization
frequency = self._validate_frequency(frequency)
power = lombscargle(*strip_units(self._trel, self.y, self.dy),
frequency=strip_units(frequency),
center_data=self.center_data,
fit_mean=self.fit_mean,
nterms=self.nterms,
normalization=normalization,
method=method, method_kwds=method_kwds,
assume_regular_frequency=assume_regular_frequency)
return power * self._power_unit(normalization)
def _as_relative_time(self, name, times):
"""
Convert the provided times (if absolute) to relative times using the
current _tstart value. If the times provided are relative, they are
returned without conversion (though we still do some checks).
"""
if isinstance(times, TimeDelta):
times = times.to('day')
if self._tstart is None:
if isinstance(times, Time):
raise TypeError('{} was provided as an absolute time but '
'the LombScargle class was initialized '
'with relative times.'.format(name))
else:
if isinstance(times, Time):
times = (times - self._tstart).to(u.day)
else:
raise TypeError('{} was provided as a relative time but '
'the LombScargle class was initialized '
'with absolute times.'.format(name))
return times
def model(self, t, frequency):
"""Compute the Lomb-Scargle model at the given frequency.
The model at a particular frequency is a linear model:
model = offset + dot(design_matrix, model_parameters)
Parameters
----------
t : array-like or `~astropy.units.Quantity` ['time']
Times (length ``n_samples``) at which to compute the model.
frequency : float
the frequency for the model
Returns
-------
y : np.ndarray
The model fit corresponding to the input times
(will have length ``n_samples``).
See Also
--------
design_matrix
offset
model_parameters
"""
frequency = self._validate_frequency(frequency)
t = self._validate_t(self._as_relative_time('t', t))
y_fit = periodic_fit(*strip_units(self._trel, self.y, self.dy),
frequency=strip_units(frequency),
t_fit=strip_units(t),
center_data=self.center_data,
fit_mean=self.fit_mean,
nterms=self.nterms)
return y_fit * get_unit(self.y)
def offset(self):
"""Return the offset of the model
The offset of the model is the (weighted) mean of the y values.
Note that if self.center_data is False, the offset is 0 by definition.
Returns
-------
offset : scalar
See Also
--------
design_matrix
model
model_parameters
"""
y, dy = strip_units(self.y, self.dy)
if dy is None:
dy = 1
dy = np.broadcast_to(dy, y.shape)
if self.center_data:
w = dy ** -2.0
y_mean = np.dot(y, w) / w.sum()
else:
y_mean = 0
return y_mean * get_unit(self.y)
def model_parameters(self, frequency, units=True):
r"""Compute the best-fit model parameters at the given frequency.
The model described by these parameters is:
.. math::
y(t; f, \vec{\theta}) = \theta_0 + \sum_{n=1}^{\tt nterms} [\theta_{2n-1}\sin(2\pi n f t) + \theta_{2n}\cos(2\pi n f t)]
where :math:`\vec{\theta}` is the array of parameters returned by this function.
Parameters
----------
frequency : float
the frequency for the model
units : bool
If True (default), return design matrix with data units.
Returns
-------
theta : np.ndarray (n_parameters,)
The best-fit model parameters at the given frequency.
See Also
--------
design_matrix
model
offset
"""
frequency = self._validate_frequency(frequency)
t, y, dy = strip_units(self._trel, self.y, self.dy)
if self.center_data:
y = y - strip_units(self.offset())
dy = np.ones_like(y) if dy is None else np.asarray(dy)
X = self.design_matrix(frequency)
parameters = np.linalg.solve(np.dot(X.T, X),
np.dot(X.T, y / dy))
if units:
parameters = get_unit(self.y) * parameters
return parameters
def design_matrix(self, frequency, t=None):
"""Compute the design matrix for a given frequency
Parameters
----------
frequency : float
the frequency for the model
t : array-like, `~astropy.units.Quantity`, or `~astropy.time.Time` (optional)
Times (length ``n_samples``) at which to compute the model.
If not specified, then the times and uncertainties of the input
data are used.
Returns
-------
X : array
The design matrix for the model at the given frequency.
This should have a shape of (``len(t)``, ``n_parameters``).
See Also
--------
model
model_parameters
offset
"""
if t is None:
t, dy = strip_units(self._trel, self.dy)
else:
t, dy = strip_units(self._validate_t(self._as_relative_time('t', t)), None)
return design_matrix(t, frequency, dy,
nterms=self.nterms,
bias=self.fit_mean)
def distribution(self, power, cumulative=False):
"""Expected periodogram distribution under the null hypothesis.
This computes the expected probability distribution or cumulative
probability distribution of periodogram power, under the null
hypothesis of a non-varying signal with Gaussian noise. Note that
this is not the same as the expected distribution of peak values;
for that see the ``false_alarm_probability()`` method.
Parameters
----------
power : array-like
The periodogram power at which to compute the distribution.
cumulative : bool, optional
If True, then return the cumulative distribution.
See Also
--------
false_alarm_probability
false_alarm_level
Returns
-------
dist : np.ndarray
The probability density or cumulative probability associated with
the provided powers.
"""
dH = 1 if self.fit_mean or self.center_data else 0
dK = dH + 2 * self.nterms
dist = _statistics.cdf_single if cumulative else _statistics.pdf_single
return dist(power, len(self._trel), self.normalization, dH=dH, dK=dK)
def false_alarm_probability(self, power, method='baluev',
samples_per_peak=5, nyquist_factor=5,
minimum_frequency=None, maximum_frequency=None,
method_kwds=None):
"""False alarm probability of periodogram maxima under the null hypothesis.
This gives an estimate of the false alarm probability given the height
of the largest peak in the periodogram, based on the null hypothesis
of non-varying data with Gaussian noise.
Parameters
----------
power : array-like
The periodogram value.
method : {'baluev', 'davies', 'naive', 'bootstrap'}, optional
The approximation method to use.
maximum_frequency : float
The maximum frequency of the periodogram.
method_kwds : dict, optional
Additional method-specific keywords.
Returns
-------
false_alarm_probability : np.ndarray
The false alarm probability
Notes
-----
The true probability distribution for the largest peak cannot be
determined analytically, so each method here provides an approximation
to the value. The available methods are:
- "baluev" (default): the upper-limit to the alias-free probability,
using the approach of Baluev (2008) [1]_.
- "davies" : the Davies upper bound from Baluev (2008) [1]_.
- "naive" : the approximate probability based on an estimated
effective number of independent frequencies.
- "bootstrap" : the approximate probability based on bootstrap
resamplings of the input data.
Note also that for normalization='psd', the distribution can only be
computed for periodograms constructed with errors specified.
See Also
--------
distribution
false_alarm_level
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
if self.nterms != 1:
raise NotImplementedError("false alarm probability is not "
"implemented for multiterm periodograms.")
if not (self.fit_mean or self.center_data):
raise NotImplementedError("false alarm probability is implemented "
"only for periodograms of centered data.")
fmin, fmax = self.autofrequency(samples_per_peak=samples_per_peak,
nyquist_factor=nyquist_factor,
minimum_frequency=minimum_frequency,
maximum_frequency=maximum_frequency,
return_freq_limits=True)
return _statistics.false_alarm_probability(power,
fmax=fmax,
t=self._trel, y=self.y, dy=self.dy,
normalization=self.normalization,
method=method,
method_kwds=method_kwds)
def false_alarm_level(self, false_alarm_probability, method='baluev',
samples_per_peak=5, nyquist_factor=5,
minimum_frequency=None, maximum_frequency=None,
method_kwds=None):
"""Level of maximum at a given false alarm probability.
This gives an estimate of the periodogram level corresponding to a
specified false alarm probability for the largest peak, assuming a
null hypothesis of non-varying data with Gaussian noise.
Parameters
----------
false_alarm_probability : array-like
The false alarm probability (0 < fap < 1).
maximum_frequency : float
The maximum frequency of the periodogram.
method : {'baluev', 'davies', 'naive', 'bootstrap'}, optional
The approximation method to use; default='baluev'.
method_kwds : dict, optional
Additional method-specific keywords.
Returns
-------
power : np.ndarray
The periodogram peak height corresponding to the specified
false alarm probability.
Notes
-----
The true probability distribution for the largest peak cannot be
determined analytically, so each method here provides an approximation
to the value. The available methods are:
- "baluev" (default): the upper-limit to the alias-free probability,
using the approach of Baluev (2008) [1]_.
- "davies" : the Davies upper bound from Baluev (2008) [1]_.
- "naive" : the approximate probability based on an estimated
effective number of independent frequencies.
- "bootstrap" : the approximate probability based on bootstrap
resamplings of the input data. The number of samples can
be set with the method-specific keyword "n_bootstraps" (default=1000).
Note also that for normalization='psd', the distribution can only be
computed for periodograms constructed with errors specified.
See Also
--------
distribution
false_alarm_probability
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
if self.nterms != 1:
raise NotImplementedError("false alarm probability is not "
"implemented for multiterm periodograms.")
if not (self.fit_mean or self.center_data):
raise NotImplementedError("false alarm probability is implemented "
"only for periodograms of centered data.")
fmin, fmax = self.autofrequency(samples_per_peak=samples_per_peak,
nyquist_factor=nyquist_factor,
minimum_frequency=minimum_frequency,
maximum_frequency=maximum_frequency,
return_freq_limits=True)
return _statistics.false_alarm_level(false_alarm_probability,
fmax=fmax,
t=self._trel, y=self.y, dy=self.dy,
normalization=self.normalization,
method=method,
method_kwds=method_kwds)
|
7e9f17b60939bec98dcb4048292271465fcd113842034f86b27b48f2d6b0d93e |
import numpy as np
def lombscargle_scipy(t, y, frequency, normalization='standard',
center_data=True):
"""Lomb-Scargle Periodogram
This is a wrapper of ``scipy.signal.lombscargle`` for computation of the
Lomb-Scargle periodogram. This is a relatively fast version of the naive
O[N^2] algorithm, but cannot handle heteroskedastic errors.
Parameters
----------
t, y : array-like
times, values, and errors of the data points. These should be
broadcastable to the same shape. None should be `~astropy.units.Quantity`.
frequency : array-like
frequencies (not angular frequencies) at which to calculate periodogram
normalization : str, optional
Normalization to use for the periodogram.
Options are 'standard', 'model', 'log', or 'psd'.
center_data : bool, optional
if True, pre-center the data by subtracting the weighted mean
of the input data.
Returns
-------
power : array-like
Lomb-Scargle power associated with each frequency.
Units of the result depend on the normalization.
References
----------
.. [1] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009)
.. [2] W. Press et al, Numerical Recipes in C (2002)
.. [3] Scargle, J.D. 1982, ApJ 263:835-853
"""
try:
from scipy import signal
except ImportError:
raise ImportError("scipy must be installed to use lombscargle_scipy")
t, y = np.broadcast_arrays(t, y)
# Scipy requires floating-point input
t = np.asarray(t, dtype=float)
y = np.asarray(y, dtype=float)
frequency = np.asarray(frequency, dtype=float)
if t.ndim != 1:
raise ValueError("t, y, dy should be one dimensional")
if frequency.ndim != 1:
raise ValueError("frequency should be one-dimensional")
if center_data:
y = y - y.mean()
# Note: scipy input accepts angular frequencies
p = signal.lombscargle(t, y, 2 * np.pi * frequency)
if normalization == 'psd':
pass
elif normalization == 'standard':
p *= 2 / (t.size * np.mean(y ** 2))
elif normalization == 'log':
p = -np.log(1 - 2 * p / (t.size * np.mean(y ** 2)))
elif normalization == 'model':
p /= 0.5 * t.size * np.mean(y ** 2) - p
else:
raise ValueError(f"normalization='{normalization}' not recognized")
return p
|
9cbffadcc8a18f20e920f5f8c2f8a7bc3cfaf5fb4c53b5a8f7dd7a9fbbd1e5a7 | from math import factorial
import numpy as np
def bitceil(N):
"""
Find the bit (i.e. power of 2) immediately greater than or equal to N
Note: this works for numbers up to 2 ** 64.
Roughly equivalent to int(2 ** np.ceil(np.log2(N)))
"""
return 1 << int(N - 1).bit_length()
def extirpolate(x, y, N=None, M=4):
"""
Extirpolate the values (x, y) onto an integer grid range(N),
using lagrange polynomial weights on the M nearest points.
Parameters
----------
x : array-like
array of abscissas
y : array-like
array of ordinates
N : int
number of integer bins to use. For best performance, N should be larger
than the maximum of x
M : int
number of adjoining points on which to extirpolate.
Returns
-------
yN : ndarray
N extirpolated values associated with range(N)
Examples
--------
>>> rng = np.random.default_rng(0)
>>> x = 100 * rng.random(20)
>>> y = np.sin(x)
>>> y_hat = extirpolate(x, y)
>>> x_hat = np.arange(len(y_hat))
>>> f = lambda x: np.sin(x / 10)
>>> np.allclose(np.sum(y * f(x)), np.sum(y_hat * f(x_hat)))
True
Notes
-----
This code is based on the C implementation of spread() presented in
Numerical Recipes in C, Second Edition (Press et al. 1989; p.583).
"""
x, y = map(np.ravel, np.broadcast_arrays(x, y))
if N is None:
N = int(np.max(x) + 0.5 * M + 1)
# Now use legendre polynomial weights to populate the results array;
# This is an efficient recursive implementation (See Press et al. 1989)
result = np.zeros(N, dtype=y.dtype)
# first take care of the easy cases where x is an integer
integers = (x % 1 == 0)
np.add.at(result, x[integers].astype(int), y[integers])
x, y = x[~integers], y[~integers]
# For each remaining x, find the index describing the extirpolation range.
# i.e. ilo[i] < x[i] < ilo[i] + M with x[i] in the center,
# adjusted so that the limits are within the range 0...N
ilo = np.clip((x - M // 2).astype(int), 0, N - M)
numerator = y * np.prod(x - ilo - np.arange(M)[:, np.newaxis], 0)
denominator = factorial(M - 1)
for j in range(M):
if j > 0:
denominator *= j / (j - M)
ind = ilo + (M - 1 - j)
np.add.at(result, ind, numerator / (denominator * (x - ind)))
return result
def trig_sum(t, h, df, N, f0=0, freq_factor=1,
oversampling=5, use_fft=True, Mfft=4):
"""Compute (approximate) trigonometric sums for a number of frequencies
This routine computes weighted sine and cosine sums::
S_j = sum_i { h_i * sin(2 pi * f_j * t_i) }
C_j = sum_i { h_i * cos(2 pi * f_j * t_i) }
Where f_j = freq_factor * (f0 + j * df) for the values j in 1 ... N.
The sums can be computed either by a brute force O[N^2] method, or
by an FFT-based O[Nlog(N)] method.
Parameters
----------
t : array-like
array of input times
h : array-like
array weights for the sum
df : float
frequency spacing
N : int
number of frequency bins to return
f0 : float, optional
The low frequency to use
freq_factor : float, optional
Factor which multiplies the frequency
use_fft : bool
if True, use the approximate FFT algorithm to compute the result.
This uses the FFT with Press & Rybicki's Lagrangian extirpolation.
oversampling : int (default = 5)
oversampling freq_factor for the approximation; roughly the number of
time samples across the highest-frequency sinusoid. This parameter
contains the trade-off between accuracy and speed. Not referenced
if use_fft is False.
Mfft : int
The number of adjacent points to use in the FFT approximation.
Not referenced if use_fft is False.
Returns
-------
S, C : ndarray
summation arrays for frequencies f = df * np.arange(1, N + 1)
"""
df *= freq_factor
f0 *= freq_factor
if df <= 0:
raise ValueError("df must be positive")
t, h = map(np.ravel, np.broadcast_arrays(t, h))
if use_fft:
Mfft = int(Mfft)
if Mfft <= 0:
raise ValueError("Mfft must be positive")
# required size of fft is the power of 2 above the oversampling rate
Nfft = bitceil(N * oversampling)
t0 = t.min()
if f0 > 0:
h = h * np.exp(2j * np.pi * f0 * (t - t0))
tnorm = ((t - t0) * Nfft * df) % Nfft
grid = extirpolate(tnorm, h, Nfft, Mfft)
fftgrid = np.fft.ifft(grid)[:N]
if t0 != 0:
f = f0 + df * np.arange(N)
fftgrid *= np.exp(2j * np.pi * t0 * f)
C = Nfft * fftgrid.real
S = Nfft * fftgrid.imag
else:
f = f0 + df * np.arange(N)
C = np.dot(h, np.cos(2 * np.pi * f * t[:, np.newaxis]))
S = np.dot(h, np.sin(2 * np.pi * f * t[:, np.newaxis]))
return S, C
|
1f001177aa72c31dae3684d1e73fdb2eed34ef95b570a8672bea1b38c84bf9e2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
from collections import defaultdict
from astropy.cosmology.connect import convert_registry
from astropy.cosmology.core import Cosmology
from astropy.table import QTable, Row
from .mapping import from_mapping
def from_row(row, *, move_to_meta=False, cosmology=None):
"""Instantiate a `~astropy.cosmology.Cosmology` from a `~astropy.table.Row`.
Parameters
----------
row : `~astropy.table.Row`
The object containing the Cosmology information.
move_to_meta : bool (optional, keyword-only)
Whether to move keyword arguments that are not in the Cosmology class'
signature to the Cosmology's metadata. This will only be applied if the
Cosmology does NOT have a keyword-only argument (e.g. ``**kwargs``).
Arguments moved to the metadata will be merged with existing metadata,
preferring specified metadata in the case of a merge conflict
(e.g. for ``Cosmology(meta={'key':10}, key=42)``, the ``Cosmology.meta``
will be ``{'key': 10}``).
cosmology : str, `~astropy.cosmology.Cosmology` class, or None (optional, keyword-only)
The cosmology class (or string name thereof) to use when constructing
the cosmology instance. The class also provides default parameter values,
filling in any non-mandatory arguments missing in 'table'.
Returns
-------
`~astropy.cosmology.Cosmology` subclass instance
Examples
--------
To see loading a `~astropy.cosmology.Cosmology` from a Row with
``from_row``, we will first make a `~astropy.table.Row` using
:func:`~astropy.cosmology.Cosmology.to_format`.
>>> from astropy.cosmology import Cosmology, Planck18
>>> cr = Planck18.to_format("astropy.row")
>>> cr
<Row index=0>
cosmology name H0 Om0 Tcmb0 Neff m_nu Ob0
km / (Mpc s) K eV
str13 str8 float64 float64 float64 float64 float64[3] float64
------------- -------- ------------ ------- ------- ------- ----------- -------
FlatLambdaCDM Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897
Now this row can be used to load a new cosmological instance identical
to the ``Planck18`` cosmology from which it was generated.
>>> cosmo = Cosmology.from_format(cr, format="astropy.row")
>>> cosmo
FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966,
Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897)
"""
# special values
name = row['name'] if 'name' in row.columns else None # get name from column
meta = defaultdict(dict, copy.deepcopy(row.meta))
# Now need to add the Columnar metadata. This is only available on the
# parent table. If Row is ever separated from Table, this should be moved
# to ``to_table``.
for col in row._table.itercols():
if col.info.meta: # Only add metadata if not empty
meta[col.name].update(col.info.meta)
# turn row into mapping, filling cosmo if not in a column
mapping = dict(row)
mapping["name"] = name
mapping.setdefault("cosmology", meta.pop("cosmology", None))
mapping["meta"] = dict(meta)
# build cosmology from map
return from_mapping(mapping, move_to_meta=move_to_meta, cosmology=cosmology)
def to_row(cosmology, *args, cosmology_in_meta=False, table_cls=QTable):
"""Serialize the cosmology into a `~astropy.table.Row`.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology` subclass instance
*args
Not used. Needed for compatibility with
`~astropy.io.registry.UnifiedReadWriteMethod`
table_cls : type (optional, keyword-only)
Astropy :class:`~astropy.table.Table` class or subclass type to use.
Default is :class:`~astropy.table.QTable`.
cosmology_in_meta : bool
Whether to put the cosmology class in the Table metadata (if `True`) or
as the first column (if `False`, default).
Returns
-------
`~astropy.table.Row`
With columns for the cosmology parameters, and metadata in the Table's
``meta`` attribute. The cosmology class name will either be a column
or in ``meta``, depending on 'cosmology_in_meta'.
Examples
--------
A Cosmology as a `~astropy.table.Row` will have the cosmology's name and
parameters as columns.
>>> from astropy.cosmology import Planck18
>>> cr = Planck18.to_format("astropy.row")
>>> cr
<Row index=0>
cosmology name H0 Om0 Tcmb0 Neff m_nu Ob0
km / (Mpc s) K eV
str13 str8 float64 float64 float64 float64 float64[3] float64
------------- -------- ------------ ------- ------- ------- ----------- -------
FlatLambdaCDM Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897
The cosmological class and other metadata, e.g. a paper reference, are in
the Table's metadata.
"""
from .table import to_table
table = to_table(cosmology, cls=table_cls, cosmology_in_meta=cosmology_in_meta)
return table[0] # extract row from table
def row_identify(origin, format, *args, **kwargs):
"""Identify if object uses the `~astropy.table.Row` format.
Returns
-------
bool
"""
itis = False
if origin == "read":
itis = isinstance(args[1], Row) and (format in (None, "astropy.row"))
return itis
# ===================================================================
# Register
convert_registry.register_reader("astropy.row", Cosmology, from_row)
convert_registry.register_writer("astropy.row", Cosmology, to_row)
convert_registry.register_identifier("astropy.row", Cosmology, row_identify)
|
3873e7280eb12cf6b2a2e7a44496caff33fbb8b9714f9ab22196991f571f3837 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy.cosmology.connect import convert_registry
from astropy.cosmology.core import Cosmology
from astropy.table import Column, QTable, Table
from .mapping import to_mapping
from .row import from_row
from .utils import convert_parameter_to_column
def from_table(table, index=None, *, move_to_meta=False, cosmology=None):
"""Instantiate a `~astropy.cosmology.Cosmology` from a |QTable|.
Parameters
----------
table : `~astropy.table.Table`
The object to parse into a |Cosmology|.
index : int, str, or None, optional
Needed to select the row in tables with multiple rows. ``index`` can be
an integer for the row number or, if the table is indexed by a column,
the value of that column. If the table is not indexed and ``index``
is a string, the "name" column is used as the indexing column.
move_to_meta : bool (optional, keyword-only)
Whether to move keyword arguments that are not in the Cosmology class'
signature to the Cosmology's metadata. This will only be applied if the
Cosmology does NOT have a keyword-only argument (e.g. ``**kwargs``).
Arguments moved to the metadata will be merged with existing metadata,
preferring specified metadata in the case of a merge conflict
(e.g. for ``Cosmology(meta={'key':10}, key=42)``, the ``Cosmology.meta``
will be ``{'key': 10}``).
cosmology : str, `~astropy.cosmology.Cosmology` class, or None (optional, keyword-only)
The cosmology class (or string name thereof) to use when constructing
the cosmology instance. The class also provides default parameter values,
filling in any non-mandatory arguments missing in 'table'.
Returns
-------
`~astropy.cosmology.Cosmology` subclass instance
Examples
--------
To see loading a `~astropy.cosmology.Cosmology` from a Table with
``from_table``, we will first make a |QTable| using
:func:`~astropy.cosmology.Cosmology.to_format`.
>>> from astropy.cosmology import Cosmology, Planck18
>>> ct = Planck18.to_format("astropy.table")
>>> ct
<QTable length=1>
name H0 Om0 Tcmb0 Neff m_nu Ob0
km / (Mpc s) K eV
str8 float64 float64 float64 float64 float64[3] float64
-------- ------------ ------- ------- ------- ----------- -------
Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897
Now this table can be used to load a new cosmological instance identical
to the ``Planck18`` cosmology from which it was generated.
>>> cosmo = Cosmology.from_format(ct, format="astropy.table")
>>> cosmo
FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966,
Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897)
Specific cosmology classes can be used to parse the data. The class'
default parameter values are used to fill in any information missing in the
data.
>>> from astropy.cosmology import FlatLambdaCDM
>>> del ct["Tcmb0"] # show FlatLambdaCDM provides default
>>> FlatLambdaCDM.from_format(ct)
FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966,
Tcmb0=0.0 K, Neff=3.046, m_nu=None, Ob0=0.04897)
For tables with multiple rows of cosmological parameters, the ``index``
argument is needed to select the correct row. The index can be an integer
for the row number or, if the table is indexed by a column, the value of
that column. If the table is not indexed and ``index`` is a string, the
"name" column is used as the indexing column.
Here is an example where ``index`` is needed and can be either an integer
(for the row number) or the name of one of the cosmologies, e.g. 'Planck15'.
>>> from astropy.cosmology import Planck13, Planck15, Planck18
>>> from astropy.table import vstack
>>> cts = vstack([c.to_format("astropy.table")
... for c in (Planck13, Planck15, Planck18)],
... metadata_conflicts='silent')
>>> cts
<QTable length=3>
name H0 Om0 Tcmb0 Neff m_nu Ob0
km / (Mpc s) K eV
str8 float64 float64 float64 float64 float64[3] float64
-------- ------------ ------- ------- ------- ----------- --------
Planck13 67.77 0.30712 2.7255 3.046 0.0 .. 0.06 0.048252
Planck15 67.74 0.3075 2.7255 3.046 0.0 .. 0.06 0.0486
Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897
>>> cosmo = Cosmology.from_format(cts, index=1, format="astropy.table")
>>> cosmo == Planck15
True
For further examples, see :doc:`astropy:cosmology/io`.
"""
# Get row from table
# string index uses the indexed column on the table to find the row index.
if isinstance(index, str):
if not table.indices: # no indexing column, find by string match
indices = np.where(table['name'] == index)[0]
else: # has indexing column
indices = table.loc_indices[index] # need to convert to row index (int)
if isinstance(indices, (int, np.integer)): # loc_indices
index = indices
elif len(indices) == 1: # only happens w/ np.where
index = indices[0]
elif len(indices) == 0: # matches from loc_indices
raise KeyError(f"No matches found for key {indices}")
else: # like the Highlander, there can be only 1 Cosmology
raise ValueError(f"more than one cosmology found for key {indices}")
# no index is needed for a 1-row table. For a multi-row table...
if index is None:
if len(table) != 1: # multi-row table and no index
raise ValueError("need to select a specific row (e.g. index=1) when "
"constructing a Cosmology from a multi-row table.")
else: # single-row table
index = 0
row = table[index] # index is now the row index (int)
# parse row to cosmo
return from_row(row, move_to_meta=move_to_meta, cosmology=cosmology)
def to_table(cosmology, *args, cls=QTable, cosmology_in_meta=True):
"""Serialize the cosmology into a `~astropy.table.QTable`.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology` subclass instance
*args
Not used. Needed for compatibility with
`~astropy.io.registry.UnifiedReadWriteMethod`
cls : type (optional, keyword-only)
Astropy :class:`~astropy.table.Table` class or subclass type to return.
Default is :class:`~astropy.table.QTable`.
cosmology_in_meta : bool
Whether to put the cosmology class in the Table metadata (if `True`,
default) or as the first column (if `False`).
Returns
-------
`~astropy.table.QTable`
With columns for the cosmology parameters, and metadata and
cosmology class name in the Table's ``meta`` attribute
Raises
------
TypeError
If kwarg (optional) 'cls' is not a subclass of `astropy.table.Table`
Examples
--------
A Cosmology as a `~astropy.table.QTable` will have the cosmology's name and
parameters as columns.
>>> from astropy.cosmology import Planck18
>>> ct = Planck18.to_format("astropy.table")
>>> ct
<QTable length=1>
name H0 Om0 Tcmb0 Neff m_nu Ob0
km / (Mpc s) K eV
str8 float64 float64 float64 float64 float64[3] float64
-------- ------------ ------- ------- ------- ----------- -------
Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897
The cosmological class and other metadata, e.g. a paper reference, are in
the Table's metadata.
>>> ct.meta
OrderedDict([..., ('cosmology', 'FlatLambdaCDM')])
To move the cosmology class from the metadata to a Table row, set the
``cosmology_in_meta`` argument to `False`:
>>> Planck18.to_format("astropy.table", cosmology_in_meta=False)
<QTable length=1>
cosmology name H0 Om0 Tcmb0 Neff m_nu Ob0
km / (Mpc s) K eV
str13 str8 float64 float64 float64 float64 float64[3] float64
------------- -------- ------------ ------- ------- ------- ----------- -------
FlatLambdaCDM Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897
Astropy recommends `~astropy.table.QTable` for tables with
`~astropy.units.Quantity` columns. However the returned type may be
overridden using the ``cls`` argument:
>>> from astropy.table import Table
>>> Planck18.to_format("astropy.table", cls=Table)
<Table length=1>
...
"""
if not issubclass(cls, Table):
raise TypeError(f"'cls' must be a (sub)class of Table, not {type(cls)}")
# Start by getting a map representation.
data = to_mapping(cosmology)
data["cosmology"] = data["cosmology"].__qualname__ # change to str
# Metadata
meta = data.pop("meta") # remove the meta
if cosmology_in_meta:
meta["cosmology"] = data.pop("cosmology")
# Need to turn everything into something Table can process:
# - Column for Parameter
# - list for anything else
cosmo_cls = cosmology.__class__
for k, v in data.items():
if k in cosmology.__parameters__:
col = convert_parameter_to_column(getattr(cosmo_cls, k), v,
cosmology.meta.get(k))
else:
col = Column([v])
data[k] = col
tbl = cls(data, meta=meta)
tbl.add_index("name", unique=True)
return tbl
def table_identify(origin, format, *args, **kwargs):
"""Identify if object uses the Table format.
Returns
-------
bool
"""
itis = False
if origin == "read":
itis = isinstance(args[1], Table) and (format in (None, "astropy.table"))
return itis
# ===================================================================
# Register
convert_registry.register_reader("astropy.table", Cosmology, from_table)
convert_registry.register_writer("astropy.table", Cosmology, to_table)
convert_registry.register_identifier("astropy.table", Cosmology, table_identify)
|
86c43aad8a06dbdc54a524e3a3a33da1ffa677f0383123b6d656105879a1b92f | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Read/Write/Interchange methods for `astropy.cosmology`. **NOT public API**.
"""
# Import to register with the I/O machinery
from . import cosmology, ecsv, mapping, model, row, table, yaml # noqa: F401, F403
|
02b2eb6fa25cf379e5ecc925a6ca4b3113845557b8f3f9a08ed5caa7beb2f593 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy.modeling import Parameter as ModelParameter
from astropy.table import Column
FULLQUALNAME_SUBSTITUTIONS = {
"astropy.cosmology.flrw.base.FLRW": "astropy.cosmology.flrw.FLRW",
"astropy.cosmology.flrw.lambdacdm.LambdaCDM": "astropy.cosmology.flrw.LambdaCDM",
"astropy.cosmology.flrw.lambdacdm.FlatLambdaCDM": "astropy.cosmology.flrw.FlatLambdaCDM",
"astropy.cosmology.flrw.w0wacdm.w0waCDM": "astropy.cosmology.flrw.w0waCDM",
"astropy.cosmology.flrw.w0wacdm.Flatw0waCDM": "astropy.cosmology.flrw.Flatw0waCDM",
"astropy.cosmology.flrw.w0wzcdm.w0wzCDM": "astropy.cosmology.flrw.w0wzCDM",
"astropy.cosmology.flrw.w0cdm.wCDM": "astropy.cosmology.flrw.wCDM",
"astropy.cosmology.flrw.w0cdm.FlatwCDM": "astropy.cosmology.flrw.FlatwCDM",
"astropy.cosmology.flrw.wpwazpcdm.wpwaCDM": "astropy.cosmology.flrw.wpwaCDM",
}
"""Substitutions mapping the actual qualified name to its preferred value."""
def convert_parameter_to_column(parameter, value, meta=None):
"""Convert a |Cosmology| Parameter to a Table |Column|.
Parameters
----------
parameter : `astropy.cosmology.parameter.Parameter`
value : Any
meta : dict or None, optional
Information from the Cosmology's metadata.
Returns
-------
`astropy.table.Column`
"""
shape = (1,) + np.shape(value) # minimum of 1d
col = Column(data=np.reshape(value, shape),
name=parameter.name,
dtype=None, # inferred from the data
description=parameter.__doc__,
format=None,
meta=meta)
return col
def convert_parameter_to_model_parameter(parameter, value, meta=None):
"""Convert a Cosmology Parameter to a Model Parameter.
Parameters
----------
parameter : `astropy.cosmology.parameter.Parameter`
value : Any
meta : dict or None, optional
Information from the Cosmology's metadata.
This function will use any of: 'getter', 'setter', 'fixed', 'tied',
'min', 'max', 'bounds', 'prior', 'posterior'.
Returns
-------
`astropy.modeling.Parameter`
"""
# Get from meta information relavant to Model
extra = {k: v for k, v in (meta or {}).items()
if k in ('getter', 'setter', 'fixed', 'tied', 'min', 'max',
'bounds', 'prior', 'posterior')}
return ModelParameter(description=parameter.__doc__,
default=value,
unit=getattr(value, "unit", None),
**extra)
|
350729c8e8eeb10899f95c053889abb7002cb06158c05958ee6b48576e3fef57 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from math import inf
import numpy as np
import pytest
from astropy.cosmology.utils import aszarr, inf_like, vectorize_if_needed, vectorize_redshift_method
from astropy.utils.exceptions import AstropyDeprecationWarning
from .test_core import _zarr, invalid_zs, valid_zs
def test_vectorize_redshift_method():
"""Test :func:`astropy.cosmology.utils.vectorize_redshift_method`."""
class Class:
@vectorize_redshift_method
def method(self, z):
return z
c = Class()
assert hasattr(c.method, "__vectorized__")
assert isinstance(c.method.__vectorized__, np.vectorize)
# calling with Number
assert c.method(1) == 1
assert isinstance(c.method(1), int)
# calling with a numpy scalar
assert c.method(np.float64(1)) == np.float64(1)
assert isinstance(c.method(np.float64(1)), np.float64)
# numpy array
assert all(c.method(np.array([1, 2])) == np.array([1, 2]))
assert isinstance(c.method(np.array([1, 2])), np.ndarray)
# non-scalar
assert all(c.method([1, 2]) == np.array([1, 2]))
assert isinstance(c.method([1, 2]), np.ndarray)
def test_vectorize_if_needed():
"""
Test :func:`astropy.cosmology.utils.vectorize_if_needed`.
There's no need to test 'veckw' because that is directly pasased to
`numpy.vectorize` which thoroughly tests the various inputs.
"""
def func(x):
return x ** 2
with pytest.warns(AstropyDeprecationWarning):
# not vectorized
assert vectorize_if_needed(func, 2) == 4
# vectorized
assert all(vectorize_if_needed(func, [2, 3]) == [4, 9])
@pytest.mark.parametrize("arr, expected",
[(0.0, inf), # float scalar
(1, inf), # integer scalar should give float output
([0.0, 1.0, 2.0, 3.0], (inf, inf, inf, inf)),
([0, 1, 2, 3], (inf, inf, inf, inf)), # integer list
])
def test_inf_like(arr, expected):
"""
Test :func:`astropy.cosmology.utils.inf_like`.
All inputs should give a float output.
These tests are also in the docstring, but it's better to have them also
in one consolidated location.
"""
with pytest.warns(AstropyDeprecationWarning):
assert np.all(inf_like(arr) == expected)
# -------------------------------------------------------------------
class Test_aszarr:
@pytest.mark.parametrize("z, expect", list(zip(valid_zs, [
0, 1, 1100, np.float64(3300), 2.0, 3.0, _zarr, _zarr, _zarr, _zarr
])))
def test_valid(self, z, expect):
"""Test :func:`astropy.cosmology.utils.aszarr`."""
got = aszarr(z)
assert np.array_equal(got, expect)
@pytest.mark.parametrize("z, exc", invalid_zs)
def test_invalid(self, z, exc):
"""Test :func:`astropy.cosmology.utils.aszarr`."""
with pytest.raises(exc):
aszarr(z)
|
ed4cb31cce82d2ad4ca59b9e2869fbd7775e69bb4b0d8466713d7c1cea4751f5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Configure the tests for :mod:`astropy.cosmology`."""
from astropy.cosmology.tests.helper import clean_registry # noqa: F401, F403
from astropy.tests.helper import pickle_protocol # noqa: F401, F403
|
0a1d44a8559cd06dde22165736d686255dbd412094603e4ddf06fc7b4a5b52bd | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Testing :mod:`astropy.cosmology.parameter`."""
##############################################################################
# IMPORTS
# STDLIB
import inspect
# THIRD PARTY
import numpy as np
import pytest
# LOCAL
import astropy.units as u
from astropy.cosmology import Cosmology
from astropy.cosmology.core import _COSMOLOGY_CLASSES
from astropy.cosmology.parameter import Parameter, _validate_to_float, _validate_with_unit
from astropy.utils.exceptions import AstropyDeprecationWarning
##############################################################################
# TESTS
##############################################################################
class ParameterTestMixin:
"""Tests for a :class:`astropy.cosmology.Parameter` on a Cosmology.
:class:`astropy.cosmology.Parameter` is a descriptor and this test suite
tests descriptors by class inheritance, so ``ParameterTestMixin`` is mixed
into ``TestCosmology`` (tests :class:`astropy.cosmology.Cosmology`).
"""
@pytest.fixture
def parameter(self, cosmo_cls):
"""Cosmological Parameters"""
# I wish this would work
# yield from {getattr(cosmo_cls, n) for n in cosmo_cls.__parameters__}
# just return one parameter at random
yield getattr(cosmo_cls, set(cosmo_cls.__parameters__).pop())
@pytest.fixture
def all_parameter(self, cosmo_cls):
"""Cosmological All Parameter instances"""
# I wish this would work
# yield from {getattr(cosmo_cls, n) for n in cosmo_cls.__all_parameters__}
# just return one parameter at random
yield getattr(cosmo_cls, set(cosmo_cls.__all_parameters__).pop())
# ===============================================================
# Method Tests
def test_Parameter_class_attributes(self, all_parameter):
"""Test :class:`astropy.cosmology.Parameter` attributes on class."""
# _registry_validators
assert hasattr(all_parameter, "_registry_validators")
assert isinstance(all_parameter._registry_validators, dict)
assert all(isinstance(k, str) for k in all_parameter._registry_validators.keys())
assert all(callable(v) for v in all_parameter._registry_validators.values())
def test_Parameter_init(self):
"""Test :class:`astropy.cosmology.Parameter` instantiation."""
# defaults
parameter = Parameter()
assert parameter.fvalidate is _validate_with_unit
assert parameter.unit is None
assert parameter.equivalencies == []
assert parameter.derived is False
assert parameter.name is None
# setting all kwargs
parameter = Parameter(fvalidate="float", doc="DOCSTRING", unit="km",
equivalencies=[u.mass_energy()], derived=True)
assert parameter.fvalidate is _validate_to_float
assert parameter.unit is u.km
assert parameter.equivalencies == [u.mass_energy()]
assert parameter.derived is True
def test_Parameter_init_deprecated_fmt(self):
"""Test that passing the argument ``fmt`` is deprecated."""
with pytest.warns(AstropyDeprecationWarning):
parameter = Parameter(fmt=".4f")
assert parameter._format_spec == ".4f"
# Test that it appears in initializing arguments
init_args = parameter._get_init_arguments()
assert init_args["fmt"] == ".4f"
def test_Parameter_instance_attributes(self, all_parameter):
"""Test :class:`astropy.cosmology.Parameter` attributes from init."""
assert hasattr(all_parameter, "fvalidate")
assert callable(all_parameter.fvalidate)
assert hasattr(all_parameter, "__doc__")
# Parameter
assert hasattr(all_parameter, "_unit")
assert hasattr(all_parameter, "_equivalencies")
assert hasattr(all_parameter, "_derived")
assert hasattr(all_parameter, "_format_spec")
# __set_name__
assert hasattr(all_parameter, "_attr_name")
assert hasattr(all_parameter, "_attr_name_private")
def test_Parameter_fvalidate(self, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.fvalidate`."""
assert hasattr(all_parameter, "fvalidate")
assert callable(all_parameter.fvalidate)
def test_Parameter_name(self, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.name`."""
assert hasattr(all_parameter, "name")
assert isinstance(all_parameter.name, str)
assert all_parameter.name is all_parameter._attr_name
def test_Parameter_unit(self, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.unit`."""
assert hasattr(all_parameter, "unit")
assert isinstance(all_parameter.unit, (u.UnitBase, type(None)))
assert all_parameter.unit is all_parameter._unit
def test_Parameter_equivalencies(self, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.equivalencies`."""
assert hasattr(all_parameter, "equivalencies")
assert isinstance(all_parameter.equivalencies, (list, u.Equivalency))
assert all_parameter.equivalencies is all_parameter._equivalencies
def test_Parameter_format_spec(self, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.format_spec`."""
with pytest.warns(AstropyDeprecationWarning):
fmt = all_parameter.format_spec
assert isinstance(fmt, str)
assert fmt is all_parameter._format_spec
def test_Parameter_derived(self, cosmo_cls, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.derived`."""
assert hasattr(all_parameter, "derived")
assert isinstance(all_parameter.derived, bool)
assert all_parameter.derived is all_parameter._derived
# test value
if all_parameter.name in cosmo_cls.__parameters__:
assert all_parameter.derived is False
else:
assert all_parameter.derived is True
# -------------------------------------------
# descriptor methods
def test_Parameter_descriptor_get(self, cosmo_cls, cosmo, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.__get__`."""
# from class
parameter = getattr(cosmo_cls, all_parameter.name)
assert isinstance(parameter, Parameter)
assert parameter is all_parameter
# from instance
parameter = getattr(cosmo, all_parameter.name)
assert np.all(parameter == getattr(cosmo, all_parameter._attr_name_private))
def test_Parameter_descriptor_set(self, cosmo, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.__set__`."""
# test it's already set
assert hasattr(cosmo, all_parameter._attr_name_private)
# and raises an error if set again
with pytest.raises(AttributeError, match="can't set attribute"):
setattr(cosmo, all_parameter._attr_name, None)
# -------------------------------------------
# validate value
# tested later.
# ===============================================================
# Usage Tests
def test_Parameter_listed(self, cosmo_cls, all_parameter):
"""Test each `astropy.cosmology.Parameter` attached to Cosmology."""
# just double check that each entry is a Parameter
assert isinstance(all_parameter, Parameter)
# the reverse: check that if it is a Parameter, it's listed.
# note have to check the more inclusive ``__all_parameters__``
assert all_parameter.name in cosmo_cls.__all_parameters__
if not all_parameter.derived:
assert all_parameter.name in cosmo_cls.__parameters__
def test_parameter_related_attributes_on_Cosmology(self, cosmo_cls):
"""Test `astropy.cosmology.Parameter`-related on Cosmology."""
# establish has expected attribute
assert hasattr(cosmo_cls, "__parameters__")
assert hasattr(cosmo_cls, "__all_parameters__")
def test_Parameter_not_unique(self, cosmo_cls, clean_registry):
"""Cosmology Parameter not unique to class when subclass defined."""
# define subclass to show param is same
class ExampleBase(cosmo_cls):
param = Parameter()
class Example(ExampleBase):
pass
assert Example.param is ExampleBase.param
assert Example.__parameters__ == ExampleBase.__parameters__
def test_Parameters_reorder_by_signature(self, cosmo_cls, clean_registry):
"""Test parameters are reordered."""
class Example(cosmo_cls):
param = Parameter()
def __init__(self, param, *, name=None, meta=None):
pass # never actually initialized
# param should be 1st, all other parameters next
Example.__parameters__[0] == "param"
# Check the other parameters are as expected.
# only run this test if "param" is not already on the cosmology
if cosmo_cls.__parameters__[0] != "param":
assert set(Example.__parameters__[1:]) == set(cosmo_cls.__parameters__)
def test_make_from_Parameter(self, cosmo_cls, clean_registry):
"""Test the parameter creation process. Uses ``__set__``."""
class Example(cosmo_cls):
param = Parameter(unit=u.eV, equivalencies=u.mass_energy())
def __init__(self, param, *, name=None, meta=None):
self.param = param
@property
def is_flat(self):
return super().is_flat()
assert Example(1).param == 1 * u.eV
assert Example(1 * u.eV).param == 1 * u.eV
assert Example(1 * u.J).param == (1 * u.J).to(u.eV)
assert Example(1 * u.kg).param == (1 * u.kg).to(u.eV, u.mass_energy())
# ========================================================================
class TestParameter(ParameterTestMixin):
"""
Test `astropy.cosmology.Parameter` directly. Adds a lot of specific tests
that wouldn't be covered by the per-cosmology tests.
"""
def setup_class(self):
class Example1(Cosmology):
param = Parameter(doc="Description of example parameter.",
unit=u.m, equivalencies=u.mass_energy())
def __init__(self, param=15):
self.param = param
@property
def is_flat(self):
return super().is_flat()
# with validator
class Example2(Example1):
def __init__(self, param=15 * u.m):
self.param = param
@Example1.param.validator
def param(self, param, value):
return value.to(u.km)
# attributes
self.classes = {"Example1": Example1, "Example2": Example2}
def teardown_class(self):
for cls in self.classes.values():
_COSMOLOGY_CLASSES.pop(cls.__qualname__)
@pytest.fixture(scope="class", params=["Example1", "Example2"])
def cosmo_cls(self, request):
"""Cosmology class."""
return self.classes[request.param]
@pytest.fixture(scope="class")
def cosmo(self, cosmo_cls):
"""Cosmology instance"""
return cosmo_cls()
@pytest.fixture(scope="class")
def param(self, cosmo_cls):
"""Get Parameter 'param' from cosmology class."""
return cosmo_cls.param
# ==============================================================
def test_Parameter_instance_attributes(self, param):
"""Test :class:`astropy.cosmology.Parameter` attributes from init."""
super().test_Parameter_instance_attributes(param)
# property
assert param.__doc__ == "Description of example parameter."
# custom from init
assert param._unit == u.m
assert param._equivalencies == u.mass_energy()
assert param._format_spec == ""
assert param._derived == np.False_
# custom from set_name
assert param._attr_name == "param"
assert param._attr_name_private == "_param"
def test_Parameter_fvalidate(self, cosmo, param):
"""Test :attr:`astropy.cosmology.Parameter.fvalidate`."""
super().test_Parameter_fvalidate(param)
value = param.fvalidate(cosmo, param, 1000 * u.m)
assert value == 1 * u.km
def test_Parameter_name(self, param):
"""Test :attr:`astropy.cosmology.Parameter.name`."""
super().test_Parameter_name(param)
assert param.name == "param"
def test_Parameter_unit(self, param):
"""Test :attr:`astropy.cosmology.Parameter.unit`."""
super().test_Parameter_unit(param)
assert param.unit == u.m
def test_Parameter_equivalencies(self, param):
"""Test :attr:`astropy.cosmology.Parameter.equivalencies`."""
super().test_Parameter_equivalencies(param)
assert param.equivalencies == u.mass_energy()
def test_Parameter_format_spec(self, param):
"""Test :attr:`astropy.cosmology.Parameter.format_spec`."""
super().test_Parameter_format_spec(param)
with pytest.warns(AstropyDeprecationWarning):
assert param.format_spec == ""
def test_Parameter_derived(self, cosmo_cls, param):
"""Test :attr:`astropy.cosmology.Parameter.derived`."""
super().test_Parameter_derived(cosmo_cls, param)
assert param.derived is False
# -------------------------------------------
# descriptor methods
def test_Parameter_descriptor_get(self, cosmo_cls, cosmo, param):
"""Test :meth:`astropy.cosmology.Parameter.__get__`."""
super().test_Parameter_descriptor_get(cosmo_cls, cosmo, param)
# from instance
value = getattr(cosmo, param.name)
assert value == 15 * u.m
# -------------------------------------------
# validation
def test_Parameter_validator(self, param):
"""Test :meth:`astropy.cosmology.Parameter.validator`."""
for k in Parameter._registry_validators:
newparam = param.validator(k)
assert newparam.fvalidate == newparam._registry_validators[k]
# error for non-registered str
with pytest.raises(ValueError, match="`fvalidate`, if str"):
Parameter(fvalidate="NOT REGISTERED")
# error if wrong type
with pytest.raises(TypeError, match="`fvalidate` must be a function or"):
Parameter(fvalidate=object())
def test_Parameter_validate(self, cosmo, param):
"""Test :meth:`astropy.cosmology.Parameter.validate`."""
value = param.validate(cosmo, 1000 * u.m)
# whether has custom validator
if param.fvalidate is param._registry_validators["default"]:
assert value.unit == u.m
assert value.value == 1000
else:
assert value.unit == u.km
assert value.value == 1
def test_Parameter_register_validator(self, param):
"""Test :meth:`astropy.cosmology.Parameter.register_validator`."""
# already registered
with pytest.raises(KeyError, match="validator 'default' already"):
param.__class__.register_validator("default", None)
# validator not None
def notnonefunc(x):
return x
try:
validator = param.__class__.register_validator("newvalidator", notnonefunc)
assert validator is notnonefunc
finally:
param.__class__._registry_validators.pop("newvalidator", None)
# used as decorator
try:
@param.__class__.register_validator("newvalidator")
def func(cosmology, param, value):
return value
assert param.__class__._registry_validators["newvalidator"] is func
finally:
param.__class__._registry_validators.pop("newvalidator", None)
# -------------------------------------------
def test_Parameter_clone(self, param):
"""Test :meth:`astropy.cosmology.Parameter.clone`."""
# this implicitly relies on `__eq__` testing properly. Which is tested.
# basic test that nothing changes
assert param.clone() == param
assert param.clone() is not param # but it's not a 'singleton'
# passing kwargs will change stuff
newparam = param.clone(unit="km/(yr sr)")
assert newparam.unit == u.km / u.yr / u.sr
assert param.unit != u.km / u.yr / u.sr # original is unchanged
# expected failure for not-an-argument
with pytest.raises(TypeError):
param.clone(not_a_valid_parameter=True)
# -------------------------------------------
def test_Parameter_equality(self):
"""
Test Parameter equality.
Determined from the processed initialization args (including defaults).
"""
p1 = Parameter(unit="km / (s Mpc)")
p2 = Parameter(unit="km / (s Mpc)")
assert p1 == p2
# not equal parameters
p3 = Parameter(unit="km / s")
assert p3 != p1
# misc
assert p1 != 2 # show doesn't error
# -------------------------------------------
def test_Parameter_repr(self, cosmo_cls, param):
"""Test Parameter repr."""
r = repr(param)
assert "Parameter(" in r
for subs in ("derived=False", 'unit=Unit("m")', 'equivalencies=[(Unit("kg"), Unit("J")',
"doc='Description of example parameter.'"):
assert subs in r, subs
# `fvalidate` is a little tricker b/c one of them is custom!
if param.fvalidate in param._registry_validators.values(): # not custom
assert "fvalidate='default'" in r
else:
assert "fvalidate=<" in r # Some function, don't care about details.
def test_Parameter_repr_roundtrip(self, param):
"""Test ``eval(repr(Parameter))`` can round trip to ``Parameter``."""
P = Parameter(doc="A description of this parameter.", derived=True)
NP = eval(repr(P)) # Evaluate string representation back into a param.
assert P == NP
# ==============================================================
def test_Parameter_doesnt_change_with_generic_class(self):
"""Descriptors are initialized once and not updated on subclasses."""
class ExampleBase:
def __init__(self, param=15):
self._param = param
sig = inspect.signature(__init__)
_init_signature = sig.replace(parameters=list(sig.parameters.values())[1:])
param = Parameter(doc="example parameter")
class Example(ExampleBase):
pass
assert Example.param is ExampleBase.param
def test_Parameter_doesnt_change_with_cosmology(self, cosmo_cls):
"""Cosmology reinitializes all descriptors when a subclass is defined."""
# define subclass to show param is same
class Example(cosmo_cls):
pass
assert Example.param is cosmo_cls.param
# unregister
_COSMOLOGY_CLASSES.pop(Example.__qualname__)
assert Example.__qualname__ not in _COSMOLOGY_CLASSES
|
4b2271edb5fa6b3e8aaa6d648154b0db6a704e87cdf58e560e06ce3d98150c95 | # -*- coding: utf-8 -*-
"""Testing :mod:`astropy.cosmology.units`."""
##############################################################################
# IMPORTS
import pytest
import astropy.cosmology.units as cu
import astropy.units as u
from astropy.cosmology import Planck13, default_cosmology
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.compat.optional_deps import HAS_ASDF, HAS_SCIPY
from astropy.utils.exceptions import AstropyDeprecationWarning
##############################################################################
# TESTS
##############################################################################
def test_has_expected_units():
"""
Test that this module has the expected set of units. Some of the units are
imported from :mod:`astropy.units`, or vice versa. Here we test presence,
not usage. Units from :mod:`astropy.units` are tested in that module. Units
defined in :mod:`astropy.cosmology` will be tested subsequently.
"""
with pytest.warns(AstropyDeprecationWarning, match="`littleh`"):
assert u.astrophys.littleh is cu.littleh
def test_has_expected_equivalencies():
"""
Test that this module has the expected set of equivalencies. Many of the
equivalencies are imported from :mod:`astropy.units`, so here we test
presence, not usage. Equivalencies from :mod:`astropy.units` are tested in
that module. Equivalencies defined in :mod:`astropy.cosmology` will be
tested subsequently.
"""
with pytest.warns(AstropyDeprecationWarning, match="`with_H0`"):
assert u.equivalencies.with_H0 is cu.with_H0
def test_littleh():
"""Test :func:`astropy.cosmology.units.with_H0`."""
H0_70 = 70 * u.km / u.s / u.Mpc
h70dist = 70 * u.Mpc / cu.littleh
assert_quantity_allclose(h70dist.to(u.Mpc, cu.with_H0(H0_70)), 100 * u.Mpc)
# make sure using the default cosmology works
cosmodist = default_cosmology.get().H0.value * u.Mpc / cu.littleh
assert_quantity_allclose(cosmodist.to(u.Mpc, cu.with_H0()), 100 * u.Mpc)
# Now try a luminosity scaling
h1lum = 0.49 * u.Lsun * cu.littleh ** -2
assert_quantity_allclose(h1lum.to(u.Lsun, cu.with_H0(H0_70)), 1 * u.Lsun)
# And the trickiest one: magnitudes. Using H0=10 here for the round numbers
H0_10 = 10 * u.km / u.s / u.Mpc
# assume the "true" magnitude M = 12.
# Then M - 5*log_10(h) = M + 5 = 17
withlittlehmag = 17 * (u.mag - u.MagUnit(cu.littleh ** 2))
assert_quantity_allclose(withlittlehmag.to(u.mag, cu.with_H0(H0_10)), 12 * u.mag)
@pytest.mark.skipif(not HAS_SCIPY, reason="Cosmology needs scipy")
def test_dimensionless_redshift():
"""Test :func:`astropy.cosmology.units.dimensionless_redshift`."""
z = 3 * cu.redshift
val = 3 * u.one
# show units not equal
assert z.unit == cu.redshift
assert z.unit != u.one
# test equivalency enabled by default
assert z == val
# also test that it works for powers
assert (3 * cu.redshift ** 3) == val
# and in composite units
assert (3 * u.km / cu.redshift ** 3) == 3 * u.km
# test it also works as an equivalency
with u.set_enabled_equivalencies([]): # turn off default equivalencies
assert z.to(u.one, equivalencies=cu.dimensionless_redshift()) == val
with pytest.raises(ValueError):
z.to(u.one)
# if this fails, something is really wrong
with u.add_enabled_equivalencies(cu.dimensionless_redshift()):
assert z == val
@pytest.mark.skipif(not HAS_SCIPY, reason="Cosmology needs scipy")
def test_redshift_temperature():
"""Test :func:`astropy.cosmology.units.redshift_temperature`."""
cosmo = Planck13.clone(Tcmb0=3 * u.K)
default_cosmo = default_cosmology.get()
z = 15 * cu.redshift
Tcmb = cosmo.Tcmb(z)
# 1) Default (without specifying the cosmology)
with default_cosmology.set(cosmo):
equivalency = cu.redshift_temperature()
assert_quantity_allclose(z.to(u.K, equivalency), Tcmb)
assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z)
# showing the answer changes if the cosmology changes
# this test uses the default cosmology
equivalency = cu.redshift_temperature()
assert_quantity_allclose(z.to(u.K, equivalency), default_cosmo.Tcmb(z))
assert default_cosmo.Tcmb(z) != Tcmb
# 2) Specifying the cosmology
equivalency = cu.redshift_temperature(cosmo)
assert_quantity_allclose(z.to(u.K, equivalency), Tcmb)
assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z)
# Test `atzkw`
equivalency = cu.redshift_temperature(cosmo, ztol=1e-10)
assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z)
@pytest.mark.skipif(not HAS_SCIPY, reason="Cosmology needs scipy")
def test_redshift_hubble():
"""Test :func:`astropy.cosmology.units.redshift_hubble`."""
unit = u.km / u.s / u.Mpc
cosmo = Planck13.clone(H0=100 * unit)
default_cosmo = default_cosmology.get()
z = 15 * cu.redshift
H = cosmo.H(z)
h = H.to_value(u.km/u.s/u.Mpc) / 100 * cu.littleh
# 1) Default (without specifying the cosmology)
with default_cosmology.set(cosmo):
equivalency = cu.redshift_hubble()
# H
assert_quantity_allclose(z.to(unit, equivalency), H)
assert_quantity_allclose(H.to(cu.redshift, equivalency), z)
# little-h
assert_quantity_allclose(z.to(cu.littleh, equivalency), h)
assert_quantity_allclose(h.to(cu.redshift, equivalency), z)
# showing the answer changes if the cosmology changes
# this test uses the default cosmology
equivalency = cu.redshift_hubble()
assert_quantity_allclose(z.to(unit, equivalency), default_cosmo.H(z))
assert default_cosmo.H(z) != H
# 2) Specifying the cosmology
equivalency = cu.redshift_hubble(cosmo)
# H
assert_quantity_allclose(z.to(unit, equivalency), H)
assert_quantity_allclose(H.to(cu.redshift, equivalency), z)
# little-h
assert_quantity_allclose(z.to(cu.littleh, equivalency), h)
assert_quantity_allclose(h.to(cu.redshift, equivalency), z)
# Test `atzkw`
equivalency = cu.redshift_hubble(cosmo, ztol=1e-10)
assert_quantity_allclose(H.to(cu.redshift, equivalency), z) # H
assert_quantity_allclose(h.to(cu.redshift, equivalency), z) # little-h
@pytest.mark.skipif(not HAS_SCIPY, reason="Cosmology needs scipy")
@pytest.mark.parametrize(
"kind",
[cu.redshift_distance.__defaults__[-1], "comoving", "lookback", "luminosity"]
)
def test_redshift_distance(kind):
"""Test :func:`astropy.cosmology.units.redshift_distance`."""
z = 15 * cu.redshift
d = getattr(Planck13, kind + "_distance")(z)
equivalency = cu.redshift_distance(cosmology=Planck13, kind=kind)
# properties of Equivalency
assert equivalency.name[0] == "redshift_distance"
assert equivalency.kwargs[0]["cosmology"] == Planck13
assert equivalency.kwargs[0]["distance"] == kind
# roundtrip
assert_quantity_allclose(z.to(u.Mpc, equivalency), d)
assert_quantity_allclose(d.to(cu.redshift, equivalency), z)
def test_redshift_distance_wrong_kind():
"""Test :func:`astropy.cosmology.units.redshift_distance` wrong kind."""
with pytest.raises(ValueError, match="`kind`"):
cu.redshift_distance(kind=None)
@pytest.mark.skipif(not HAS_SCIPY, reason="Cosmology needs scipy")
class Test_with_redshift:
"""Test `astropy.cosmology.units.with_redshift`."""
@pytest.fixture(scope="class")
def cosmo(self):
"""Test cosmology."""
return Planck13.clone(Tcmb0=3 * u.K)
# ===========================================
def test_cosmo_different(self, cosmo):
"""The default is different than the test cosmology."""
default_cosmo = default_cosmology.get()
assert default_cosmo != cosmo # shows changing default
def test_no_equivalency(self, cosmo):
"""Test the equivalency ``with_redshift`` without any enabled."""
equivalency = cu.with_redshift(distance=None, hubble=False, Tcmb=False)
assert len(equivalency) == 0
# -------------------------------------------
def test_temperature_off(self, cosmo):
"""Test ``with_redshift`` with the temperature off."""
z = 15 * cu.redshift
# 1) Default (without specifying the cosmology)
with default_cosmology.set(cosmo):
equivalency = cu.with_redshift(Tcmb=False)
with pytest.raises(u.UnitConversionError, match="'redshift' and 'K'"):
z.to(u.K, equivalency)
# 2) Specifying the cosmology
equivalency = cu.with_redshift(cosmo, Tcmb=False)
with pytest.raises(u.UnitConversionError, match="'redshift' and 'K'"):
z.to(u.K, equivalency)
def test_temperature(self, cosmo):
"""Test temperature equivalency component."""
default_cosmo = default_cosmology.get()
z = 15 * cu.redshift
Tcmb = cosmo.Tcmb(z)
# 1) Default (without specifying the cosmology)
with default_cosmology.set(cosmo):
equivalency = cu.with_redshift(Tcmb=True)
assert_quantity_allclose(z.to(u.K, equivalency), Tcmb)
assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z)
# showing the answer changes if the cosmology changes
# this test uses the default cosmology
equivalency = cu.with_redshift(Tcmb=True)
assert_quantity_allclose(z.to(u.K, equivalency), default_cosmo.Tcmb(z))
assert default_cosmo.Tcmb(z) != Tcmb
# 2) Specifying the cosmology
equivalency = cu.with_redshift(cosmo, Tcmb=True)
assert_quantity_allclose(z.to(u.K, equivalency), Tcmb)
assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z)
# Test `atzkw`
# this is really just a test that 'atzkw' doesn't fail
equivalency = cu.with_redshift(cosmo, Tcmb=True, atzkw={"ztol": 1e-10})
assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z)
# -------------------------------------------
def test_hubble_off(self, cosmo):
"""Test ``with_redshift`` with Hubble off."""
unit = u.km / u.s / u.Mpc
z = 15 * cu.redshift
# 1) Default (without specifying the cosmology)
with default_cosmology.set(cosmo):
equivalency = cu.with_redshift(hubble=False)
with pytest.raises(u.UnitConversionError, match="'redshift' and 'km / "):
z.to(unit, equivalency)
# 2) Specifying the cosmology
equivalency = cu.with_redshift(cosmo, hubble=False)
with pytest.raises(u.UnitConversionError, match="'redshift' and 'km / "):
z.to(unit, equivalency)
def test_hubble(self, cosmo):
"""Test Hubble equivalency component."""
unit = u.km/u.s/u.Mpc
default_cosmo = default_cosmology.get()
z = 15 * cu.redshift
H = cosmo.H(z)
h = H.to_value(u.km / u.s / u.Mpc) / 100 * cu.littleh
# 1) Default (without specifying the cosmology)
with default_cosmology.set(cosmo):
equivalency = cu.with_redshift(hubble=True)
# H
assert_quantity_allclose(z.to(unit, equivalency), H)
assert_quantity_allclose(H.to(cu.redshift, equivalency), z)
# little-h
assert_quantity_allclose(z.to(cu.littleh, equivalency), h)
assert_quantity_allclose(h.to(cu.redshift, equivalency), z)
# showing the answer changes if the cosmology changes
# this test uses the default cosmology
equivalency = cu.with_redshift(hubble=True)
assert_quantity_allclose(z.to(unit, equivalency), default_cosmo.H(z))
assert default_cosmo.H(z) != H
# 2) Specifying the cosmology
equivalency = cu.with_redshift(cosmo, hubble=True)
# H
assert_quantity_allclose(z.to(unit, equivalency), H)
assert_quantity_allclose(H.to(cu.redshift, equivalency), z)
# little-h
assert_quantity_allclose(z.to(cu.littleh, equivalency), h)
assert_quantity_allclose(h.to(cu.redshift, equivalency), z)
# Test `atzkw`
# this is really just a test that 'atzkw' doesn't fail
equivalency = cu.with_redshift(cosmo, hubble=True, atzkw={"ztol": 1e-10})
assert_quantity_allclose(H.to(cu.redshift, equivalency), z) # H
assert_quantity_allclose(h.to(cu.redshift, equivalency), z) # h
# -------------------------------------------
def test_distance_off(self, cosmo):
"""Test ``with_redshift`` with the distance off."""
z = 15 * cu.redshift
# 1) Default (without specifying the cosmology)
with default_cosmology.set(cosmo):
equivalency = cu.with_redshift(distance=None)
with pytest.raises(u.UnitConversionError, match="'redshift' and 'Mpc'"):
z.to(u.Mpc, equivalency)
# 2) Specifying the cosmology
equivalency = cu.with_redshift(cosmo, distance=None)
with pytest.raises(u.UnitConversionError, match="'redshift' and 'Mpc'"):
z.to(u.Mpc, equivalency)
def test_distance_default(self):
"""Test distance equivalency default."""
z = 15 * cu.redshift
d = default_cosmology.get().comoving_distance(z)
equivalency = cu.with_redshift()
assert_quantity_allclose(z.to(u.Mpc, equivalency), d)
assert_quantity_allclose(d.to(cu.redshift, equivalency), z)
def test_distance_wrong_kind(self):
"""Test distance equivalency, but the wrong kind."""
with pytest.raises(ValueError, match="`kind`"):
cu.with_redshift(distance=ValueError)
@pytest.mark.parametrize("kind", ["comoving", "lookback", "luminosity"])
def test_distance(self, kind):
"""Test distance equivalency."""
cosmo = Planck13
z = 15 * cu.redshift
dist = getattr(cosmo, kind + "_distance")(z)
default_cosmo = default_cosmology.get()
assert default_cosmo != cosmo # shows changing default
# 1) without specifying the cosmology
with default_cosmology.set(cosmo):
equivalency = cu.with_redshift(distance=kind)
assert_quantity_allclose(z.to(u.Mpc, equivalency), dist)
# showing the answer changes if the cosmology changes
# this test uses the default cosmology
equivalency = cu.with_redshift(distance=kind)
assert_quantity_allclose(z.to(u.Mpc, equivalency),
getattr(default_cosmo, kind + "_distance")(z))
assert not u.allclose(getattr(default_cosmo, kind + "_distance")(z), dist)
# 2) Specifying the cosmology
equivalency = cu.with_redshift(cosmo, distance=kind)
assert_quantity_allclose(z.to(u.Mpc, equivalency), dist)
assert_quantity_allclose(dist.to(cu.redshift, equivalency), z)
# Test atzkw
# this is really just a test that 'atzkw' doesn't fail
equivalency = cu.with_redshift(cosmo, distance=kind, atzkw={"ztol": 1e-10})
assert_quantity_allclose(dist.to(cu.redshift, equivalency), z)
# FIXME! get "dimensionless_redshift", "with_redshift" to work in this
# they are not in ``astropy.units.equivalencies``, so the following fails
@pytest.mark.skipif(not HAS_ASDF, reason="requires ASDF")
@pytest.mark.parametrize("equiv", [cu.with_H0])
def test_equivalencies_asdf(tmpdir, equiv, recwarn):
from asdf.tests import helpers
tree = {"equiv": equiv()}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_equivalency_context_manager():
base_registry = u.get_current_unit_registry()
# check starting with only the dimensionless_redshift equivalency.
assert len(base_registry.equivalencies) == 1
assert str(base_registry.equivalencies[0][0]) == "redshift"
|
8215065b6ddd3637368a86c2de6169387ff8e251db2b1aaed56f7927ab2d3a54 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import inspect
import os
import pytest
from astropy import cosmology
from astropy.cosmology import Cosmology, w0wzCDM
from astropy.cosmology.connect import readwrite_registry
from astropy.cosmology.io.tests import (
test_cosmology, test_ecsv, test_json, test_mapping, test_model, test_row, test_table, test_yaml)
from astropy.table import QTable, Row
###############################################################################
# SETUP
cosmo_instances = cosmology.realizations.available
# Collect the registered read/write formats.
readwrite_formats = {"ascii.ecsv", "json"}
# Collect all the registered to/from formats. Unfortunately this is NOT
# automatic since the output format class is not stored on the registry.
# (format, data type)
tofrom_formats = [("mapping", dict), ("yaml", str),
("astropy.cosmology", Cosmology),
("astropy.row", Row), ("astropy.table", QTable)]
###############################################################################
class ReadWriteTestMixin(test_ecsv.ReadWriteECSVTestMixin, test_json.ReadWriteJSONTestMixin):
"""
Tests for a CosmologyRead/Write on a |Cosmology|.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must define a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestReadWriteCosmology`` or ``TestCosmology`` for examples.
"""
@pytest.mark.parametrize("format", readwrite_formats)
def test_readwrite_complete_info(self, cosmo, tmpdir, format):
"""
Test writing from an instance and reading from the base class.
This requires full information.
The round-tripped metadata can be in a different order, so the
OrderedDict must be converted to a dict before testing equality.
"""
fname = str(tmpdir / f"{cosmo.name}.{format}")
cosmo.write(fname, format=format)
# Also test kwarg "overwrite"
assert os.path.exists(fname) # file exists
with pytest.raises(IOError):
cosmo.write(fname, format=format, overwrite=False)
assert os.path.exists(fname) # overwrite file existing file
cosmo.write(fname, format=format, overwrite=True)
# Read back
got = Cosmology.read(fname, format=format)
assert got == cosmo
assert dict(got.meta) == dict(cosmo.meta)
@pytest.mark.parametrize("format", readwrite_formats)
def test_readwrite_from_subclass_complete_info(self, cosmo_cls, cosmo, tmpdir, format):
"""
Test writing from an instance and reading from that class, when there's
full information saved.
"""
fname = str(tmpdir / f"{cosmo.name}.{format}")
cosmo.write(fname, format=format)
# read with the same class that wrote.
got = cosmo_cls.read(fname, format=format)
assert got == cosmo
assert got.meta == cosmo.meta
# this should be equivalent to
got = Cosmology.read(fname, format=format, cosmology=cosmo_cls)
assert got == cosmo
assert got.meta == cosmo.meta
# and also
got = Cosmology.read(fname, format=format, cosmology=cosmo_cls.__qualname__)
assert got == cosmo
assert got.meta == cosmo.meta
class TestCosmologyReadWrite(ReadWriteTestMixin):
"""Test the classes CosmologyRead/Write."""
@pytest.fixture(scope="class", params=cosmo_instances)
def cosmo(self, request):
return getattr(cosmology.realizations, request.param)
@pytest.fixture(scope="class")
def cosmo_cls(self, cosmo):
return cosmo.__class__
# ==============================================================
@pytest.mark.parametrize("format", readwrite_formats)
def test_write_methods_have_explicit_kwarg_overwrite(self, format):
writer = readwrite_registry.get_writer(format, Cosmology)
# test in signature
sig = inspect.signature(writer)
assert "overwrite" in sig.parameters
# also in docstring
assert "overwrite : bool" in writer.__doc__
@pytest.mark.parametrize("format", readwrite_formats)
def test_readwrite_reader_class_mismatch(self, cosmo, tmpdir, format):
"""Test when the reader class doesn't match the file."""
fname = tmpdir / f"{cosmo.name}.{format}"
cosmo.write(str(fname), format=format)
# class mismatch
# when reading directly
with pytest.raises(TypeError, match="missing 1 required"):
w0wzCDM.read(fname, format=format)
with pytest.raises(TypeError, match="missing 1 required"):
Cosmology.read(fname, format=format, cosmology=w0wzCDM)
# when specifying the class
with pytest.raises(ValueError, match="`cosmology` must be either"):
w0wzCDM.read(fname, format=format, cosmology="FlatLambdaCDM")
###############################################################################
# To/From_Format Tests
class ToFromFormatTestMixin(test_cosmology.ToFromCosmologyTestMixin,
test_mapping.ToFromMappingTestMixin, test_model.ToFromModelTestMixin,
test_row.ToFromRowTestMixin, test_table.ToFromTableTestMixin,
test_yaml.ToFromYAMLTestMixin):
"""
Tests for a Cosmology[To/From]Format on a |Cosmology|.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must define a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmology`` for an example.
"""
@pytest.mark.parametrize("format, totype", tofrom_formats)
def test_tofromformat_complete_info(self, cosmo, format, totype,
xfail_if_not_registered_with_yaml):
"""Read tests happen later."""
# test to_format
obj = cosmo.to_format(format)
assert isinstance(obj, totype)
# test from_format
got = Cosmology.from_format(obj, format=format)
# Test autodetect, if enabled
if self.can_autodentify(format):
got2 = Cosmology.from_format(obj)
assert got2 == got # internal consistency
assert got == cosmo # external consistency
assert got.meta == cosmo.meta
@pytest.mark.parametrize("format, totype", tofrom_formats)
def test_fromformat_subclass_complete_info(self, cosmo_cls, cosmo, format, totype,
xfail_if_not_registered_with_yaml):
"""
Test transforming an instance and parsing from that class, when there's
full information available.
Partial information tests are handled in the Mixin super classes.
"""
# test to_format
obj = cosmo.to_format(format)
assert isinstance(obj, totype)
# read with the same class that wrote.
got = cosmo_cls.from_format(obj, format=format)
if self.can_autodentify(format):
got2 = Cosmology.from_format(obj) # and autodetect
assert got2 == got # internal consistency
assert got == cosmo # external consistency
assert got.meta == cosmo.meta
# this should be equivalent to
got = Cosmology.from_format(obj, format=format, cosmology=cosmo_cls)
assert got == cosmo
assert got.meta == cosmo.meta
# and also
got = Cosmology.from_format(obj, format=format, cosmology=cosmo_cls.__qualname__)
assert got == cosmo
assert got.meta == cosmo.meta
class TestCosmologyToFromFormat(ToFromFormatTestMixin):
"""Test Cosmology[To/From]Format classes."""
@pytest.fixture(scope="class", params=cosmo_instances)
def cosmo(self, request):
return getattr(cosmology.realizations, request.param)
@pytest.fixture(scope="class")
def cosmo_cls(self, cosmo):
return cosmo.__class__
# ==============================================================
@pytest.mark.parametrize("format_type", tofrom_formats)
def test_fromformat_class_mismatch(self, cosmo, format_type):
format, totype = format_type
# test to_format
obj = cosmo.to_format(format)
assert isinstance(obj, totype)
# class mismatch
with pytest.raises(TypeError):
w0wzCDM.from_format(obj, format=format)
with pytest.raises(TypeError):
Cosmology.from_format(obj, format=format, cosmology=w0wzCDM)
# when specifying the class
with pytest.raises(ValueError, match="`cosmology` must be either"):
w0wzCDM.from_format(obj, format=format, cosmology="FlatLambdaCDM")
|
7057d35c3e886aa7fe3dd282326f89e2181c0770fa0c134a54a02e13a3c2e8dd | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Testing :mod:`astropy.cosmology.core`."""
##############################################################################
# IMPORTS
# STDLIB
import abc
import inspect
import pickle
# THIRD PARTY
import numpy as np
import pytest
# LOCAL
import astropy.cosmology.units as cu
import astropy.units as u
from astropy.cosmology import Cosmology, CosmologyError, FlatCosmologyMixin
from astropy.cosmology.core import _COSMOLOGY_CLASSES
from astropy.cosmology.parameter import Parameter
from astropy.table import Column, QTable, Table
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy.utils.metadata import MetaData
from .test_connect import ReadWriteTestMixin, ToFromFormatTestMixin
from .test_parameter import ParameterTestMixin
##############################################################################
# SETUP / TEARDOWN
scalar_zs = [
0, 1, 1100, # interesting times
# FIXME! np.inf breaks some funcs. 0 * inf is an error
np.float64(3300), # different type
2 * cu.redshift, 3 * u.one # compatible units
]
_zarr = np.linspace(0, 1e5, num=20)
array_zs = [
_zarr, # numpy
_zarr.tolist(), # pure python
Column(_zarr), # table-like
_zarr * cu.redshift # Quantity
]
valid_zs = scalar_zs + array_zs
invalid_zs = [
(None, TypeError), # wrong type
# Wrong units (the TypeError is for the cython, which can differ)
(4 * u.MeV, (u.UnitConversionError, TypeError)), # scalar
([0, 1] * u.m, (u.UnitConversionError, TypeError)), # array
]
class SubCosmology(Cosmology):
"""Defined here to be serializable."""
H0 = Parameter(unit="km/(s Mpc)")
Tcmb0 = Parameter(unit=u.K)
m_nu = Parameter(unit=u.eV)
def __init__(self, H0, Tcmb0=0*u.K, m_nu=0*u.eV, name=None, meta=None):
super().__init__(name=name, meta=meta)
self.H0 = H0
self.Tcmb0 = Tcmb0
self.m_nu = m_nu
@property
def is_flat(self):
return super().is_flat()
##############################################################################
# TESTS
##############################################################################
class MetaTestMixin:
"""Tests for a :class:`astropy.utils.metadata.MetaData` on a Cosmology."""
def test_meta_on_class(self, cosmo_cls):
assert isinstance(cosmo_cls.meta, MetaData)
def test_meta_on_instance(self, cosmo):
assert isinstance(cosmo.meta, dict) # test type
# value set at initialization
assert cosmo.meta == self.cls_kwargs.get("meta", {})
def test_meta_mutable(self, cosmo):
"""The metadata is NOT immutable on a cosmology"""
key = tuple(cosmo.meta.keys())[0] # select some key
cosmo.meta[key] = cosmo.meta.pop(key) # will error if immutable
class TestCosmology(ParameterTestMixin, MetaTestMixin,
ReadWriteTestMixin, ToFromFormatTestMixin,
metaclass=abc.ABCMeta):
"""Test :class:`astropy.cosmology.Cosmology`.
Subclasses should define tests for:
- ``test_clone_change_param()``
- ``test_repr()``
"""
def setup_class(self):
"""
Setup for testing.
Cosmology should not be instantiated, so tests are done on a subclass.
"""
# make sure SubCosmology is known
_COSMOLOGY_CLASSES["SubCosmology"] = SubCosmology
self.cls = SubCosmology
self._cls_args = dict(H0=70 * (u.km / u.s / u.Mpc), Tcmb0=2.7 * u.K, m_nu=0.6 * u.eV)
self.cls_kwargs = dict(name=self.__class__.__name__, meta={"a": "b"})
def teardown_class(self):
_COSMOLOGY_CLASSES.pop("SubCosmology", None)
@property
def cls_args(self):
return tuple(self._cls_args.values())
@pytest.fixture(scope="class")
def cosmo_cls(self):
"""The Cosmology class as a :func:`pytest.fixture`."""
return self.cls
@pytest.fixture(scope="function") # ensure not cached.
def ba(self):
"""Return filled `inspect.BoundArguments` for cosmology."""
ba = self.cls._init_signature.bind(*self.cls_args, **self.cls_kwargs)
ba.apply_defaults()
return ba
@pytest.fixture(scope="class")
def cosmo(self, cosmo_cls):
"""The cosmology instance with which to test."""
ba = self.cls._init_signature.bind(*self.cls_args, **self.cls_kwargs)
ba.apply_defaults()
return cosmo_cls(*ba.args, **ba.kwargs)
# ===============================================================
# Method & Attribute Tests
# ---------------------------------------------------------------
# class-level
def test_init_subclass(self, cosmo_cls):
"""Test creating subclasses registers classes and manages Parameters."""
class InitSubclassTest(cosmo_cls):
pass
# test parameters
assert InitSubclassTest.__parameters__ == cosmo_cls.__parameters__
# test and cleanup registry
registrant = _COSMOLOGY_CLASSES.pop(InitSubclassTest.__qualname__)
assert registrant is InitSubclassTest
def test_init_signature(self, cosmo_cls, cosmo):
"""Test class-property ``_init_signature``."""
# test presence
assert hasattr(cosmo_cls, "_init_signature")
assert hasattr(cosmo, "_init_signature")
# test internal consistency, so following tests can use either cls or instance.
assert cosmo_cls._init_signature == cosmo._init_signature
# test matches __init__, but without 'self'
sig = inspect.signature(cosmo.__init__) # (instances don't have self)
assert set(sig.parameters.keys()) == set(cosmo._init_signature.parameters.keys())
assert all(np.all(sig.parameters[k].default == p.default) for k, p in
cosmo._init_signature.parameters.items())
# ---------------------------------------------------------------
# instance-level
def test_init(self, cosmo_cls):
"""Test initialization."""
# Cosmology only does name and meta, but this subclass adds H0 & Tcmb0.
cosmo = cosmo_cls(*self.cls_args, name="test_init", meta={"m": 1})
assert cosmo.name == "test_init"
assert cosmo.meta["m"] == 1
# if meta is None, it is changed to a dict
cosmo = cosmo_cls(*self.cls_args, name="test_init", meta=None)
assert cosmo.meta == {}
def test_name(self, cosmo):
"""Test property ``name``."""
assert cosmo.name is cosmo._name # accesses private attribute
assert cosmo.name is None or isinstance(cosmo.name, str) # type
assert cosmo.name == self.cls_kwargs["name"] # test has expected value
# immutable
with pytest.raises(AttributeError, match="can't set"):
cosmo.name = None
def test_is_flat(self, cosmo_cls, cosmo):
"""Test property ``is_flat``. It's an ABC."""
with pytest.raises(NotImplementedError, match="is_flat is not implemented"):
cosmo.is_flat
# ------------------------------------------------
# clone
def test_clone_identical(self, cosmo):
"""Test method ``.clone()`` if no (kw)args."""
assert cosmo.clone() is cosmo
def test_clone_name(self, cosmo):
"""Test method ``.clone()`` name argument."""
# test changing name. clone treats 'name' differently (see next test)
c = cosmo.clone(name="cloned cosmo")
assert c.name == "cloned cosmo" # changed
# show name is the only thing changed
c._name = cosmo.name # first change name back
assert c == cosmo
assert c.meta == cosmo.meta
# now change a different parameter and see how 'name' changes
c = cosmo.clone(meta={"test_clone_name": True})
assert c.name == cosmo.name + " (modified)"
def test_clone_meta(self, cosmo):
"""Test method ``.clone()`` meta argument: updates meta, doesn't clear."""
# start with no change
c = cosmo.clone(meta=None)
assert c.meta == cosmo.meta
# add something
c = cosmo.clone(meta=dict(test_clone_meta=True))
assert c.meta["test_clone_meta"] is True
c.meta.pop("test_clone_meta") # remove from meta
assert c.meta == cosmo.meta # now they match
def test_clone_change_param(self, cosmo):
"""
Test method ``.clone()`` changing a(many) Parameter(s).
Nothing here b/c no Parameters.
"""
def test_clone_fail_unexpected_arg(self, cosmo):
"""Test when ``.clone()`` gets an unexpected argument."""
with pytest.raises(TypeError, match="unexpected keyword argument"):
cosmo.clone(not_an_arg=4)
def test_clone_fail_positional_arg(self, cosmo):
with pytest.raises(TypeError, match="1 positional argument"):
cosmo.clone(None)
# ---------------------------------------------------------------
# comparison methods
def test_is_equivalent(self, cosmo):
"""Test :meth:`astropy.cosmology.Cosmology.is_equivalent`."""
# to self
assert cosmo.is_equivalent(cosmo)
# same class, different instance
newclone = cosmo.clone(name="test_is_equivalent")
assert cosmo.is_equivalent(newclone)
assert newclone.is_equivalent(cosmo)
# different class and not convertible to Cosmology.
assert not cosmo.is_equivalent(2)
def test_equality(self, cosmo):
"""Test method ``.__eq__()."""
# wrong class
assert (cosmo != 2) and (2 != cosmo)
# correct
assert cosmo == cosmo
# different name <= not equal, but equivalent
newcosmo = cosmo.clone(name="test_equality")
assert (cosmo != newcosmo) and (newcosmo != cosmo)
assert cosmo.__equiv__(newcosmo) and newcosmo.__equiv__(cosmo)
# ---------------------------------------------------------------
def test_repr(self, cosmo_cls, cosmo):
"""Test method ``.__repr__()``.
This is a very general test and it is probably good to have a
hard-coded comparison.
"""
r = repr(cosmo)
# class in string rep
assert cosmo_cls.__qualname__ in r
assert r.index(cosmo_cls.__qualname__) == 0 # it's the first thing
r = r[len(cosmo_cls.__qualname__) + 1:] # remove
# name in string rep
if cosmo.name is not None:
assert f"name=\"{cosmo.name}\"" in r
assert r.index("name=") == 0
r = r[6 + len(cosmo.name) + 3:] # remove
# parameters in string rep
ps = {k: getattr(cosmo, k) for k in cosmo.__parameters__}
cps = {k: getattr(cosmo_cls, k) for k in cosmo.__parameters__}
for k, v in ps.items():
sv = f"{k}={v}"
assert sv in r
assert r.index(k) == 0
r = r[len(sv) + 2:] # remove
# ------------------------------------------------
@pytest.mark.parametrize("in_meta", [True, False])
@pytest.mark.parametrize("table_cls", [Table, QTable])
def test_astropy_table(self, cosmo, table_cls, in_meta):
"""Test ``astropy.table.Table(cosmology)``."""
tbl = table_cls(cosmo, cosmology_in_meta=in_meta)
assert isinstance(tbl, table_cls)
# the name & all parameters are columns
for n in ("name", *cosmo.__parameters__):
assert n in tbl.colnames
assert np.all(tbl[n] == getattr(cosmo, n))
# check if Cosmology is in metadata or a column
if in_meta:
assert tbl.meta["cosmology"] == cosmo.__class__.__qualname__
assert "cosmology" not in tbl.colnames
else:
assert "cosmology" not in tbl.meta
assert tbl["cosmology"][0] == cosmo.__class__.__qualname__
# the metadata is transferred
for k, v in cosmo.meta.items():
assert np.all(tbl.meta[k] == v)
# ===============================================================
# Usage Tests
def test_immutability(self, cosmo):
"""
Test immutability of cosmologies.
The metadata is mutable: see ``test_meta_mutable``.
"""
for n in cosmo.__all_parameters__:
with pytest.raises(AttributeError):
setattr(cosmo, n, getattr(cosmo, n))
def test_pickle_class(self, cosmo_cls, pickle_protocol):
"""Test classes can pickle and unpickle."""
# pickle and unpickle
f = pickle.dumps(cosmo_cls, protocol=pickle_protocol)
unpickled = pickle.loads(f)
# test equality
assert unpickled == cosmo_cls
def test_pickle_instance(self, cosmo, pickle_protocol):
"""Test instances can pickle and unpickle."""
# pickle and unpickle
f = pickle.dumps(cosmo, protocol=pickle_protocol)
with u.add_enabled_units(cu):
unpickled = pickle.loads(f)
assert unpickled == cosmo
assert unpickled.meta == cosmo.meta
class CosmologySubclassTest(TestCosmology):
"""
Test subclasses of :class:`astropy.cosmology.Cosmology`.
This is broken away from ``TestCosmology``, because |Cosmology| is/will be
an ABC and subclasses must override some methods.
"""
@abc.abstractmethod
def setup_class(self):
"""Setup for testing."""
# ===============================================================
# Method & Attribute Tests
# ---------------------------------------------------------------
# instance-level
@abc.abstractmethod
def test_is_flat(self, cosmo_cls, cosmo):
"""Test property ``is_flat``."""
# -----------------------------------------------------------------------------
class FlatCosmologyMixinTest:
"""Tests for :class:`astropy.cosmology.core.FlatCosmologyMixin` subclasses.
The test suite structure mirrors the implementation of the tested code.
Just like :class:`astropy.cosmology.FlatCosmologyMixin` is an abstract
base class (ABC) that cannot be used by itself, so too is this corresponding
test class an ABC mixin.
E.g to use this class::
class TestFlatSomeCosmology(FlatCosmologyMixinTest, TestSomeCosmology):
...
"""
def test_nonflat_class_(self, cosmo_cls, cosmo):
"""Test :attr:`astropy.cosmology.core.FlatCosmologyMixin.nonflat_cls`.
"""
# Test it's a method on the class
assert issubclass(cosmo_cls, cosmo_cls._nonflat_cls_)
# It also works from the instance. # TODO! as a "metaclassmethod"
assert issubclass(cosmo_cls, cosmo._nonflat_cls_)
# Maybe not the most robust test, but so far all Flat classes have the
# name of their parent class.
assert cosmo._nonflat_cls_.__name__ in cosmo_cls.__name__
def test_is_flat(self, cosmo_cls, cosmo):
"""Test property ``is_flat``."""
super().test_is_flat(cosmo_cls, cosmo)
# it's always True
assert cosmo.is_flat is True
def test_nonflat(self, cosmo):
"""Test :attr:`astropy.cosmology.core.FlatCosmologyMixin.nonflat`.
"""
assert cosmo.nonflat.is_equivalent(cosmo)
assert cosmo.is_equivalent(cosmo.nonflat)
# ------------------------------------------------
# clone
def test_clone_to_nonflat_equivalent(self, cosmo):
"""Test method ``.clone()``to_nonflat argument."""
# just converting the class
nc = cosmo.clone(to_nonflat=True)
assert isinstance(nc, cosmo._nonflat_cls_)
assert nc == cosmo.nonflat
@abc.abstractmethod
def test_clone_to_nonflat_change_param(self, cosmo):
"""
Test method ``.clone()`` changing a(many) Parameter(s). No parameters
are changed here because FlatCosmologyMixin has no Parameters.
See class docstring for why this test method exists.
"""
# send to non-flat
nc = cosmo.clone(to_nonflat=True)
assert isinstance(nc, cosmo._nonflat_cls_)
assert nc == cosmo.nonflat
# ------------------------------------------------
def test_is_equivalent(self, cosmo):
"""Test :meth:`astropy.cosmology.core.FlatCosmologyMixin.is_equivalent`.
Normally this would pass up via super(), but ``__equiv__`` is meant
to be overridden, so we skip super().
e.g. FlatFLRWMixinTest -> FlatCosmologyMixinTest -> TestCosmology
vs FlatFLRWMixinTest -> FlatCosmologyMixinTest -> TestFLRW -> TestCosmology
"""
CosmologySubclassTest.test_is_equivalent(self, cosmo)
# ===============================================================
# Usage Tests
def test_subclassing(self, cosmo_cls):
"""Test when subclassing a flat cosmology."""
class SubClass1(cosmo_cls):
pass
# The classes have the same non-flat parent class
assert SubClass1._nonflat_cls_ is cosmo_cls._nonflat_cls_
# A more complex example is when Mixin classes are used.
class Mixin:
pass
class SubClass2(Mixin, cosmo_cls):
pass
# The classes have the same non-flat parent class
assert SubClass2._nonflat_cls_ is cosmo_cls._nonflat_cls_
# The order of the Mixin should not matter
class SubClass3(cosmo_cls, Mixin):
pass
# The classes have the same non-flat parent class
assert SubClass3._nonflat_cls_ is cosmo_cls._nonflat_cls_
def test_nonflat_cls_multiple_nonflat_inheritance():
"""
Test :meth:`astropy.cosmology.core.FlatCosmologyMixin._nonflat_cls_`
when there's more than one non-flat class in the inheritance.
"""
# Define a non-operable minimal subclass of Cosmology.
class SubCosmology2(Cosmology):
def __init__(self, H0, Tcmb0=0*u.K, m_nu=0*u.eV, name=None, meta=None):
super().__init__(name=name, meta=meta)
@property
def is_flat(self):
return False
# Now make an ambiguous flat cosmology from the two SubCosmologies
with pytest.raises(TypeError, match="cannot create a consistent non-flat class"):
class FlatSubCosmology(FlatCosmologyMixin, SubCosmology, SubCosmology2):
@property
def nonflat(self):
pass
# -----------------------------------------------------------------------------
def test_flrw_moved_deprecation():
"""Test the deprecation warning about the move of FLRW classes."""
from astropy.cosmology import flrw
# it's deprecated to import `flrw/*` from `core.py`
with pytest.warns(AstropyDeprecationWarning):
from astropy.cosmology.core import FLRW
# but they are the same object
assert FLRW is flrw.FLRW
|
f114fb7f20b414a4ad9a8221bd3c0d43d9cea1c8dbdb48bc7769c6ea775739ac | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Stand-alone overall systems tests for :mod:`astropy.cosmology`."""
from io import StringIO
import numpy as np
import pytest
import astropy.constants as const
import astropy.units as u
from astropy.cosmology import flrw
from astropy.cosmology.realizations import Planck18
from astropy.units import allclose
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.utils.exceptions import AstropyUserWarning
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy.")
def test_flat_z1():
"""Test a flat cosmology at z=1 against several other on-line calculators.
Test values were taken from the following web cosmology calculators on
2012-02-11:
Wright: http://www.astro.ucla.edu/~wright/CosmoCalc.html
(https://ui.adsabs.harvard.edu/abs/2006PASP..118.1711W)
Kempner: http://www.kempner.net/cosmic.php
iCosmos: http://www.icosmos.co.uk/index.html
"""
cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=0.0)
# The order of values below is Wright, Kempner, iCosmos'
assert allclose(cosmo.comoving_distance(1),
[3364.5, 3364.8, 3364.7988] * u.Mpc, rtol=1e-4)
assert allclose(cosmo.angular_diameter_distance(1),
[1682.3, 1682.4, 1682.3994] * u.Mpc, rtol=1e-4)
assert allclose(cosmo.luminosity_distance(1),
[6729.2, 6729.6, 6729.5976] * u.Mpc, rtol=1e-4)
assert allclose(cosmo.lookback_time(1),
[7.841, 7.84178, 7.843] * u.Gyr, rtol=1e-3)
assert allclose(cosmo.lookback_distance(1),
[2404.0, 2404.24, 2404.4] * u.Mpc, rtol=1e-3)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy.")
def test_varyde_lumdist_mathematica():
"""Tests a few varying dark energy EOS models against a Mathematica computation."""
z = np.array([0.2, 0.4, 0.9, 1.2])
# w0wa models
cosmo = flrw.w0waCDM(H0=70, Om0=0.2, Ode0=0.8, w0=-1.1, wa=0.2, Tcmb0=0.0)
assert allclose(cosmo.luminosity_distance(z),
[1004.0, 2268.62, 6265.76, 9061.84] * u.Mpc, rtol=1e-4)
assert allclose(cosmo.de_density_scale(0.0), 1.0, rtol=1e-5)
assert allclose(cosmo.de_density_scale([0.0, 0.5, 1.5]),
[1.0, 0.9246310669529021, 0.9184087000251957])
cosmo = flrw.w0waCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wa=0.0, Tcmb0=0.0)
assert allclose(cosmo.luminosity_distance(z),
[971.667, 2141.67, 5685.96, 8107.41] * u.Mpc, rtol=1e-4)
cosmo = flrw.w0waCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wa=-0.5, Tcmb0=0.0)
assert allclose(cosmo.luminosity_distance(z),
[974.087, 2157.08, 5783.92, 8274.08] * u.Mpc, rtol=1e-4)
# wpwa models
cosmo = flrw.wpwaCDM(H0=70, Om0=0.2, Ode0=0.8, wp=-1.1, wa=0.2, zp=0.5, Tcmb0=0.0)
assert allclose(cosmo.luminosity_distance(z),
[1010.81, 2294.45, 6369.45, 9218.95] * u.Mpc, rtol=1e-4)
cosmo = flrw.wpwaCDM(H0=70, Om0=0.2, Ode0=0.8, wp=-1.1, wa=0.2, zp=0.9, Tcmb0=0.0)
assert allclose(cosmo.luminosity_distance(z),
[1013.68, 2305.3, 6412.37, 9283.33] * u.Mpc, rtol=1e-4)
###############################################################################
# TODO! sort and refactor following tests.
# overall systems tests stay here, specific tests go to new test suite.
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_units():
""" Test if the right units are being returned"""
cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=2.0)
assert cosmo.comoving_distance(1.0).unit == u.Mpc
assert cosmo._comoving_distance_z1z2(1.0, 2.0).unit == u.Mpc
assert cosmo.comoving_transverse_distance(1.0).unit == u.Mpc
assert cosmo._comoving_transverse_distance_z1z2(1.0, 2.0).unit == u.Mpc
assert cosmo.angular_diameter_distance(1.0).unit == u.Mpc
assert cosmo.angular_diameter_distance_z1z2(1.0, 2.0).unit == u.Mpc
assert cosmo.luminosity_distance(1.0).unit == u.Mpc
assert cosmo.lookback_time(1.0).unit == u.Gyr
assert cosmo.lookback_distance(1.0).unit == u.Mpc
assert cosmo.H(1.0).unit == u.km / u.Mpc / u.s
assert cosmo.Tcmb(1.0).unit == u.K
assert cosmo.Tcmb([0.0, 1.0]).unit == u.K
assert cosmo.Tnu(1.0).unit == u.K
assert cosmo.Tnu([0.0, 1.0]).unit == u.K
assert cosmo.arcsec_per_kpc_comoving(1.0).unit == u.arcsec / u.kpc
assert cosmo.arcsec_per_kpc_proper(1.0).unit == u.arcsec / u.kpc
assert cosmo.kpc_comoving_per_arcmin(1.0).unit == u.kpc / u.arcmin
assert cosmo.kpc_proper_per_arcmin(1.0).unit == u.kpc / u.arcmin
assert cosmo.critical_density(1.0).unit == u.g / u.cm ** 3
assert cosmo.comoving_volume(1.0).unit == u.Mpc ** 3
assert cosmo.age(1.0).unit == u.Gyr
assert cosmo.distmod(1.0).unit == u.mag
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_distance_broadcast():
""" Test array shape broadcasting for functions with single
redshift inputs"""
cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.27,
m_nu=u.Quantity([0.0, 0.1, 0.011], u.eV))
z = np.linspace(0.1, 1, 6)
z_reshape2d = z.reshape(2, 3)
z_reshape3d = z.reshape(3, 2, 1)
# Things with units
methods = ['comoving_distance', 'luminosity_distance',
'comoving_transverse_distance', 'angular_diameter_distance',
'distmod', 'lookback_time', 'age', 'comoving_volume',
'differential_comoving_volume', 'kpc_comoving_per_arcmin']
for method in methods:
g = getattr(cosmo, method)
value_flat = g(z)
assert value_flat.shape == z.shape
value_2d = g(z_reshape2d)
assert value_2d.shape == z_reshape2d.shape
value_3d = g(z_reshape3d)
assert value_3d.shape == z_reshape3d.shape
assert value_flat.unit == value_2d.unit
assert value_flat.unit == value_3d.unit
assert allclose(value_flat, value_2d.flatten())
assert allclose(value_flat, value_3d.flatten())
# Also test unitless ones
methods = ['absorption_distance', 'Om', 'Ode', 'Ok', 'H',
'w', 'de_density_scale', 'Onu', 'Ogamma',
'nu_relative_density']
for method in methods:
g = getattr(cosmo, method)
value_flat = g(z)
assert value_flat.shape == z.shape
value_2d = g(z_reshape2d)
assert value_2d.shape == z_reshape2d.shape
value_3d = g(z_reshape3d)
assert value_3d.shape == z_reshape3d.shape
assert allclose(value_flat, value_2d.flatten())
assert allclose(value_flat, value_3d.flatten())
# Test some dark energy models
methods = ['Om', 'Ode', 'w', 'de_density_scale']
for tcosmo in [flrw.LambdaCDM(H0=70, Om0=0.27, Ode0=0.5),
flrw.wCDM(H0=70, Om0=0.27, Ode0=0.5, w0=-1.2),
flrw.w0waCDM(H0=70, Om0=0.27, Ode0=0.5, w0=-1.2, wa=-0.2),
flrw.wpwaCDM(H0=70, Om0=0.27, Ode0=0.5,
wp=-1.2, wa=-0.2, zp=0.9),
flrw.w0wzCDM(H0=70, Om0=0.27, Ode0=0.5, w0=-1.2, wz=0.1)]:
for method in methods:
g = getattr(cosmo, method)
value_flat = g(z)
assert value_flat.shape == z.shape
value_2d = g(z_reshape2d)
assert value_2d.shape == z_reshape2d.shape
value_3d = g(z_reshape3d)
assert value_3d.shape == z_reshape3d.shape
assert allclose(value_flat, value_2d.flatten())
assert allclose(value_flat, value_3d.flatten())
def test_equality():
"""Test equality and equivalence."""
# mismatched signatures, both directions.
newcosmo = flrw.w0waCDM(**Planck18._init_arguments, Ode0=0.6)
assert newcosmo != Planck18
assert Planck18 != newcosmo
def test_xtfuncs():
""" Test of absorption and lookback integrand"""
cosmo = flrw.LambdaCDM(70, 0.3, 0.5, Tcmb0=2.725)
z = np.array([2.0, 3.2])
assert allclose(cosmo.lookback_time_integrand(3), 0.052218976654969378,
rtol=1e-4)
assert allclose(cosmo.lookback_time_integrand(z),
[0.10333179, 0.04644541], rtol=1e-4)
assert allclose(cosmo.abs_distance_integrand(3), 3.3420145059180402,
rtol=1e-4)
assert allclose(cosmo.abs_distance_integrand(z),
[2.7899584, 3.44104758], rtol=1e-4)
# This class is to test whether the routines work correctly
# if one only overloads w(z)
class test_cos_sub(flrw.FLRW):
def __init__(self):
super().__init__(70.0, 0.27, 0.73, Tcmb0=0.0, name="test_cos")
self._w0 = -0.9
def w(self, z):
return self._w0 * np.ones_like(z)
# Similar, but with neutrinos
class test_cos_subnu(flrw.FLRW):
def __init__(self):
super().__init__(70.0, 0.27, 0.73, Tcmb0=3.0, m_nu=0.1 * u.eV, name="test_cos_nu")
self._w0 = -0.8
def w(self, z):
return self._w0 * np.ones_like(z)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_de_subclass():
# This is the comparison object
z = [0.2, 0.4, 0.6, 0.9]
cosmo = flrw.wCDM(H0=70, Om0=0.27, Ode0=0.73, w0=-0.9, Tcmb0=0.0)
# Values taken from Ned Wrights advanced cosmo calculator, Aug 17 2012
assert allclose(cosmo.luminosity_distance(z),
[975.5, 2158.2, 3507.3, 5773.1] * u.Mpc, rtol=1e-3)
# Now try the subclass that only gives w(z)
cosmo = test_cos_sub()
assert allclose(cosmo.luminosity_distance(z),
[975.5, 2158.2, 3507.3, 5773.1] * u.Mpc, rtol=1e-3)
# Test efunc
assert allclose(cosmo.efunc(1.0), 1.7489240754, rtol=1e-5)
assert allclose(cosmo.efunc([0.5, 1.0]),
[1.31744953, 1.7489240754], rtol=1e-5)
assert allclose(cosmo.inv_efunc([0.5, 1.0]),
[0.75904236, 0.57178011], rtol=1e-5)
# Test de_density_scale
assert allclose(cosmo.de_density_scale(1.0), 1.23114444, rtol=1e-4)
assert allclose(cosmo.de_density_scale([0.5, 1.0]),
[1.12934694, 1.23114444], rtol=1e-4)
# Add neutrinos for efunc, inv_efunc
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_matter():
# Test non-relativistic matter evolution
tcos = flrw.FlatLambdaCDM(70.0, 0.3, Ob0=0.045)
assert allclose(tcos.Om0, 0.3)
assert allclose(tcos.H0, 70.0 * u.km / u.s / u.Mpc)
assert allclose(tcos.Om(0), 0.3)
assert allclose(tcos.Ob(0), 0.045)
z = np.array([0.0, 0.5, 1.0, 2.0])
assert allclose(tcos.Om(z), [0.3, 0.59124088, 0.77419355, 0.92045455],
rtol=1e-4)
assert allclose(tcos.Ob(z),
[0.045, 0.08868613, 0.11612903, 0.13806818], rtol=1e-4)
assert allclose(tcos.Odm(z), [0.255, 0.50255474, 0.65806452, 0.78238636],
rtol=1e-4)
# Consistency of dark and baryonic matter evolution with all
# non-relativistic matter
assert allclose(tcos.Ob(z) + tcos.Odm(z), tcos.Om(z))
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_ocurv():
# Test Ok evolution
# Flat, boring case
tcos = flrw.FlatLambdaCDM(70.0, 0.3)
assert allclose(tcos.Ok0, 0.0)
assert allclose(tcos.Ok(0), 0.0)
z = np.array([0.0, 0.5, 1.0, 2.0])
assert allclose(tcos.Ok(z), [0.0, 0.0, 0.0, 0.0],
rtol=1e-6)
# Not flat
tcos = flrw.LambdaCDM(70.0, 0.3, 0.5, Tcmb0=u.Quantity(0.0, u.K))
assert allclose(tcos.Ok0, 0.2)
assert allclose(tcos.Ok(0), 0.2)
assert allclose(tcos.Ok(z), [0.2, 0.22929936, 0.21621622, 0.17307692],
rtol=1e-4)
# Test the sum; note that Ogamma/Onu are 0
assert allclose(tcos.Ok(z) + tcos.Om(z) + tcos.Ode(z),
[1.0, 1.0, 1.0, 1.0], rtol=1e-5)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_ode():
# Test Ode evolution, turn off neutrinos, cmb
tcos = flrw.FlatLambdaCDM(70.0, 0.3, Tcmb0=0)
assert allclose(tcos.Ode0, 0.7)
assert allclose(tcos.Ode(0), 0.7)
z = np.array([0.0, 0.5, 1.0, 2.0])
assert allclose(tcos.Ode(z), [0.7, 0.408759, 0.2258065, 0.07954545],
rtol=1e-5)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_ogamma():
"""Tests the effects of changing the temperature of the CMB"""
# Tested against Ned Wright's advanced cosmology calculator,
# Sep 7 2012. The accuracy of our comparison is limited by
# how many digits it outputs, which limits our test to about
# 0.2% accuracy. The NWACC does not allow one
# to change the number of nuetrino species, fixing that at 3.
# Also, inspection of the NWACC code shows it uses inaccurate
# constants at the 0.2% level (specifically, a_B),
# so we shouldn't expect to match it that well. The integral is
# also done rather crudely. Therefore, we should not expect
# the NWACC to be accurate to better than about 0.5%, which is
# unfortunate, but reflects a problem with it rather than this code.
# More accurate tests below using Mathematica
z = np.array([1.0, 10.0, 500.0, 1000.0])
cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=0, Neff=3)
assert allclose(cosmo.angular_diameter_distance(z),
[1651.9, 858.2, 26.855, 13.642] * u.Mpc, rtol=5e-4)
cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725, Neff=3)
assert allclose(cosmo.angular_diameter_distance(z),
[1651.8, 857.9, 26.767, 13.582] * u.Mpc, rtol=5e-4)
cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=4.0, Neff=3)
assert allclose(cosmo.angular_diameter_distance(z),
[1651.4, 856.6, 26.489, 13.405] * u.Mpc, rtol=5e-4)
# Next compare with doing the integral numerically in Mathematica,
# which allows more precision in the test. It is at least as
# good as 0.01%, possibly better
cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=0, Neff=3.04)
assert allclose(cosmo.angular_diameter_distance(z),
[1651.91, 858.205, 26.8586, 13.6469] * u.Mpc, rtol=1e-5)
cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725, Neff=3.04)
assert allclose(cosmo.angular_diameter_distance(z),
[1651.76, 857.817, 26.7688, 13.5841] * u.Mpc, rtol=1e-5)
cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=4.0, Neff=3.04)
assert allclose(cosmo.angular_diameter_distance(z),
[1651.21, 856.411, 26.4845, 13.4028] * u.Mpc, rtol=1e-5)
# Just to be really sure, we also do a version where the integral
# is analytic, which is a Ode = 0 flat universe. In this case
# Integrate(1/E(x),{x,0,z}) = 2 ( sqrt((1+Or z)/(1+z)) - 1 )/(Or - 1)
# Recall that c/H0 * Integrate(1/E) is FLRW.comoving_distance.
Ogamma0h2 = 4 * 5.670373e-8 / 299792458.0 ** 3 * 2.725 ** 4 / 1.87837e-26
Onu0h2 = Ogamma0h2 * 7.0 / 8.0 * (4.0 / 11.0) ** (4.0 / 3.0) * 3.04
Or0 = (Ogamma0h2 + Onu0h2) / 0.7 ** 2
Om0 = 1.0 - Or0
hubdis = (299792.458 / 70.0) * u.Mpc
cosmo = flrw.FlatLambdaCDM(H0=70, Om0=Om0, Tcmb0=2.725, Neff=3.04)
targvals = 2.0 * hubdis * \
(np.sqrt((1.0 + Or0 * z) / (1.0 + z)) - 1.0) / (Or0 - 1.0)
assert allclose(cosmo.comoving_distance(z), targvals, rtol=1e-5)
# And integers for z
assert allclose(cosmo.comoving_distance(z.astype(int)),
targvals, rtol=1e-5)
# Try Tcmb0 = 4
Or0 *= (4.0 / 2.725) ** 4
Om0 = 1.0 - Or0
cosmo = flrw.FlatLambdaCDM(H0=70, Om0=Om0, Tcmb0=4.0, Neff=3.04)
targvals = 2.0 * hubdis * \
(np.sqrt((1.0 + Or0 * z) / (1.0 + z)) - 1.0) / (Or0 - 1.0)
assert allclose(cosmo.comoving_distance(z), targvals, rtol=1e-5)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_tcmb():
cosmo = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=2.5)
assert allclose(cosmo.Tcmb0, 2.5 * u.K)
assert allclose(cosmo.Tcmb(2), 7.5 * u.K)
z = [0.0, 1.0, 2.0, 3.0, 9.0]
assert allclose(cosmo.Tcmb(z),
[2.5, 5.0, 7.5, 10.0, 25.0] * u.K, rtol=1e-6)
# Make sure it's the same for integers
z = [0, 1, 2, 3, 9]
assert allclose(cosmo.Tcmb(z),
[2.5, 5.0, 7.5, 10.0, 25.0] * u.K, rtol=1e-6)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_tnu():
cosmo = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0)
assert allclose(cosmo.Tnu0, 2.1412975665108247 * u.K, rtol=1e-6)
assert allclose(cosmo.Tnu(2), 6.423892699532474 * u.K, rtol=1e-6)
z = [0.0, 1.0, 2.0, 3.0]
expected = [2.14129757, 4.28259513, 6.4238927, 8.56519027] * u.K
assert allclose(cosmo.Tnu(z), expected, rtol=1e-6)
# Test for integers
z = [0, 1, 2, 3]
assert allclose(cosmo.Tnu(z), expected, rtol=1e-6)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_efunc_vs_invefunc_flrw():
""" Test that efunc and inv_efunc give inverse values"""
z0 = 0.5
z = np.array([0.5, 1.0, 2.0, 5.0])
# FLRW is abstract, so requires test_cos_sub defined earlier
# This requires scipy, unlike the built-ins, because it
# calls de_density_scale, which has an integral in it
cosmo = test_cos_sub()
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
# Add neutrinos
cosmo = test_cos_subnu()
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_kpc_methods():
cosmo = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
assert allclose(cosmo.arcsec_per_kpc_comoving(3),
0.0317179167 * u.arcsec / u.kpc)
assert allclose(cosmo.arcsec_per_kpc_proper(3),
0.1268716668 * u.arcsec / u.kpc)
assert allclose(cosmo.kpc_comoving_per_arcmin(3),
1891.6753126 * u.kpc / u.arcmin)
assert allclose(cosmo.kpc_proper_per_arcmin(3),
472.918828 * u.kpc / u.arcmin)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_comoving_volume():
c_flat = flrw.LambdaCDM(H0=70, Om0=0.27, Ode0=0.73, Tcmb0=0.0)
c_open = flrw.LambdaCDM(H0=70, Om0=0.27, Ode0=0.0, Tcmb0=0.0)
c_closed = flrw.LambdaCDM(H0=70, Om0=2, Ode0=0.0, Tcmb0=0.0)
# test against ned wright's calculator (cubic Gpc)
redshifts = np.array([0.5, 1, 2, 3, 5, 9])
wright_flat = np.array([29.123, 159.529, 630.427, 1178.531, 2181.485,
3654.802]) * u.Gpc**3
wright_open = np.array([20.501, 99.019, 380.278, 747.049, 1558.363,
3123.814]) * u.Gpc**3
wright_closed = np.array([12.619, 44.708, 114.904, 173.709, 258.82,
358.992]) * u.Gpc**3
# The wright calculator isn't very accurate, so we use a rather
# modest precision
assert allclose(c_flat.comoving_volume(redshifts), wright_flat,
rtol=1e-2)
assert allclose(c_open.comoving_volume(redshifts),
wright_open, rtol=1e-2)
assert allclose(c_closed.comoving_volume(redshifts),
wright_closed, rtol=1e-2)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_differential_comoving_volume():
from scipy.integrate import quad
c_flat = flrw.LambdaCDM(H0=70, Om0=0.27, Ode0=0.73, Tcmb0=0.0)
c_open = flrw.LambdaCDM(H0=70, Om0=0.27, Ode0=0.0, Tcmb0=0.0)
c_closed = flrw.LambdaCDM(H0=70, Om0=2, Ode0=0.0, Tcmb0=0.0)
# test that integration of differential_comoving_volume()
# yields same as comoving_volume()
redshifts = np.array([0.5, 1, 2, 3, 5, 9])
wright_flat = np.array([29.123, 159.529, 630.427, 1178.531, 2181.485,
3654.802]) * u.Gpc**3
wright_open = np.array([20.501, 99.019, 380.278, 747.049, 1558.363,
3123.814]) * u.Gpc**3
wright_closed = np.array([12.619, 44.708, 114.904, 173.709, 258.82,
358.992]) * u.Gpc**3
# The wright calculator isn't very accurate, so we use a rather
# modest precision.
def ftemp(x): return c_flat.differential_comoving_volume(x).value
def otemp(x): return c_open.differential_comoving_volume(x).value
def ctemp(x): return c_closed.differential_comoving_volume(x).value
# Multiply by solid_angle (4 * pi)
assert allclose(np.array([4.0 * np.pi * quad(ftemp, 0, redshift)[0]
for redshift in redshifts]) * u.Mpc**3,
wright_flat, rtol=1e-2)
assert allclose(np.array([4.0 * np.pi * quad(otemp, 0, redshift)[0]
for redshift in redshifts]) * u.Mpc**3,
wright_open, rtol=1e-2)
assert allclose(np.array([4.0 * np.pi * quad(ctemp, 0, redshift)[0]
for redshift in redshifts]) * u.Mpc**3,
wright_closed, rtol=1e-2)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_flat_open_closed_icosmo():
""" Test against the tabulated values generated from icosmo.org
with three example cosmologies (flat, open and closed).
"""
cosmo_flat = """\
# from icosmo (icosmo.org)
# Om 0.3 w -1 h 0.7 Ol 0.7
# z comoving_transvers_dist angular_diameter_dist luminosity_dist
0.0000000 0.0000000 0.0000000 0.0000000
0.16250000 669.77536 576.15085 778.61386
0.32500000 1285.5964 970.26143 1703.4152
0.50000000 1888.6254 1259.0836 2832.9381
0.66250000 2395.5489 1440.9317 3982.6000
0.82500000 2855.5732 1564.6976 5211.4210
1.0000000 3303.8288 1651.9144 6607.6577
1.1625000 3681.1867 1702.2829 7960.5663
1.3250000 4025.5229 1731.4077 9359.3408
1.5000000 4363.8558 1745.5423 10909.640
1.6625000 4651.4830 1747.0359 12384.573
1.8250000 4916.5970 1740.3883 13889.387
2.0000000 5179.8621 1726.6207 15539.586
2.1625000 5406.0204 1709.4136 17096.540
2.3250000 5616.5075 1689.1752 18674.888
2.5000000 5827.5418 1665.0120 20396.396
2.6625000 6010.4886 1641.0890 22013.414
2.8250000 6182.1688 1616.2533 23646.796
3.0000000 6355.6855 1588.9214 25422.742
3.1625000 6507.2491 1563.3031 27086.425
3.3250000 6650.4520 1537.6768 28763.205
3.5000000 6796.1499 1510.2555 30582.674
3.6625000 6924.2096 1485.0852 32284.127
3.8250000 7045.8876 1460.2876 33996.408
4.0000000 7170.3664 1434.0733 35851.832
4.1625000 7280.3423 1410.2358 37584.767
4.3250000 7385.3277 1386.9160 39326.870
4.5000000 7493.2222 1362.4040 41212.722
4.6625000 7588.9589 1340.2135 42972.480
"""
cosmo_open = """\
# from icosmo (icosmo.org)
# Om 0.3 w -1 h 0.7 Ol 0.1
# z comoving_transvers_dist angular_diameter_dist luminosity_dist
0.0000000 0.0000000 0.0000000 0.0000000
0.16250000 643.08185 553.18868 747.58265
0.32500000 1200.9858 906.40441 1591.3062
0.50000000 1731.6262 1154.4175 2597.4393
0.66250000 2174.3252 1307.8648 3614.8157
0.82500000 2578.7616 1413.0201 4706.2399
1.0000000 2979.3460 1489.6730 5958.6920
1.1625000 3324.2002 1537.2024 7188.5829
1.3250000 3646.8432 1568.5347 8478.9104
1.5000000 3972.8407 1589.1363 9932.1017
1.6625000 4258.1131 1599.2913 11337.226
1.8250000 4528.5346 1603.0211 12793.110
2.0000000 4804.9314 1601.6438 14414.794
2.1625000 5049.2007 1596.5852 15968.097
2.3250000 5282.6693 1588.7727 17564.875
2.5000000 5523.0914 1578.0261 19330.820
2.6625000 5736.9813 1566.4113 21011.694
2.8250000 5942.5803 1553.6158 22730.370
3.0000000 6155.4289 1538.8572 24621.716
3.1625000 6345.6997 1524.4924 26413.975
3.3250000 6529.3655 1509.6799 28239.506
3.5000000 6720.2676 1493.3928 30241.204
3.6625000 6891.5474 1478.0799 32131.840
3.8250000 7057.4213 1462.6780 34052.058
4.0000000 7230.3723 1446.0745 36151.862
4.1625000 7385.9998 1430.7021 38130.224
4.3250000 7537.1112 1415.4199 40135.117
4.5000000 7695.0718 1399.1040 42322.895
4.6625000 7837.5510 1384.1150 44380.133
"""
cosmo_closed = """\
# from icosmo (icosmo.org)
# Om 2 w -1 h 0.7 Ol 0.1
# z comoving_transvers_dist angular_diameter_dist luminosity_dist
0.0000000 0.0000000 0.0000000 0.0000000
0.16250000 601.80160 517.67879 699.59436
0.32500000 1057.9502 798.45297 1401.7840
0.50000000 1438.2161 958.81076 2157.3242
0.66250000 1718.6778 1033.7912 2857.3019
0.82500000 1948.2400 1067.5288 3555.5381
1.0000000 2152.7954 1076.3977 4305.5908
1.1625000 2312.3427 1069.2914 5000.4410
1.3250000 2448.9755 1053.3228 5693.8681
1.5000000 2575.6795 1030.2718 6439.1988
1.6625000 2677.9671 1005.8092 7130.0873
1.8250000 2768.1157 979.86398 7819.9270
2.0000000 2853.9222 951.30739 8561.7665
2.1625000 2924.8116 924.84161 9249.7167
2.3250000 2988.5333 898.80701 9936.8732
2.5000000 3050.3065 871.51614 10676.073
2.6625000 3102.1909 847.01459 11361.774
2.8250000 3149.5043 823.39982 12046.854
3.0000000 3195.9966 798.99915 12783.986
3.1625000 3235.5334 777.30533 13467.908
3.3250000 3271.9832 756.52790 14151.327
3.5000000 3308.1758 735.15017 14886.791
3.6625000 3339.2521 716.19347 15569.263
3.8250000 3368.1489 698.06195 16251.319
4.0000000 3397.0803 679.41605 16985.401
4.1625000 3422.1142 662.87926 17666.664
4.3250000 3445.5542 647.05243 18347.576
4.5000000 3469.1805 630.76008 19080.493
4.6625000 3489.7534 616.29199 19760.729
"""
redshifts, dm, da, dl = np.loadtxt(StringIO(cosmo_flat), unpack=1)
dm = dm * u.Mpc
da = da * u.Mpc
dl = dl * u.Mpc
cosmo = flrw.LambdaCDM(H0=70, Om0=0.3, Ode0=0.70, Tcmb0=0.0)
assert allclose(cosmo.comoving_transverse_distance(redshifts), dm)
assert allclose(cosmo.angular_diameter_distance(redshifts), da)
assert allclose(cosmo.luminosity_distance(redshifts), dl)
redshifts, dm, da, dl = np.loadtxt(StringIO(cosmo_open), unpack=1)
dm = dm * u.Mpc
da = da * u.Mpc
dl = dl * u.Mpc
cosmo = flrw.LambdaCDM(H0=70, Om0=0.3, Ode0=0.1, Tcmb0=0.0)
assert allclose(cosmo.comoving_transverse_distance(redshifts), dm)
assert allclose(cosmo.angular_diameter_distance(redshifts), da)
assert allclose(cosmo.luminosity_distance(redshifts), dl)
redshifts, dm, da, dl = np.loadtxt(StringIO(cosmo_closed), unpack=1)
dm = dm * u.Mpc
da = da * u.Mpc
dl = dl * u.Mpc
cosmo = flrw.LambdaCDM(H0=70, Om0=2, Ode0=0.1, Tcmb0=0.0)
assert allclose(cosmo.comoving_transverse_distance(redshifts), dm)
assert allclose(cosmo.angular_diameter_distance(redshifts), da)
assert allclose(cosmo.luminosity_distance(redshifts), dl)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_integral():
# Test integer vs. floating point inputs
cosmo = flrw.LambdaCDM(H0=73.2, Om0=0.3, Ode0=0.50)
assert allclose(cosmo.comoving_distance(3),
cosmo.comoving_distance(3.0), rtol=1e-7)
assert allclose(cosmo.comoving_distance([1, 2, 3, 5]),
cosmo.comoving_distance([1.0, 2.0, 3.0, 5.0]),
rtol=1e-7)
assert allclose(cosmo.efunc(6), cosmo.efunc(6.0), rtol=1e-7)
assert allclose(cosmo.efunc([1, 2, 6]),
cosmo.efunc([1.0, 2.0, 6.0]), rtol=1e-7)
assert allclose(cosmo.inv_efunc([1, 2, 6]),
cosmo.inv_efunc([1.0, 2.0, 6.0]), rtol=1e-7)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_de_densityscale():
cosmo = flrw.LambdaCDM(H0=70, Om0=0.3, Ode0=0.70)
z = np.array([0.1, 0.2, 0.5, 1.5, 2.5])
assert allclose(cosmo.de_density_scale(z),
[1.0, 1.0, 1.0, 1.0, 1.0])
# Integer check
assert allclose(cosmo.de_density_scale(3),
cosmo.de_density_scale(3.0), rtol=1e-7)
assert allclose(cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)
cosmo = flrw.wCDM(H0=70, Om0=0.3, Ode0=0.60, w0=-0.5)
assert allclose(cosmo.de_density_scale(z),
[1.15369, 1.31453, 1.83712, 3.95285, 6.5479],
rtol=1e-4)
assert allclose(cosmo.de_density_scale(3),
cosmo.de_density_scale(3.0), rtol=1e-7)
assert allclose(cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)
cosmo = flrw.w0wzCDM(H0=70, Om0=0.3, Ode0=0.50, w0=-1, wz=0.5)
assert allclose(cosmo.de_density_scale(z),
[0.746048, 0.5635595, 0.25712378, 0.026664129,
0.0035916468], rtol=1e-4)
assert allclose(cosmo.de_density_scale(3),
cosmo.de_density_scale(3.0), rtol=1e-7)
assert allclose(cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)
cosmo = flrw.w0waCDM(H0=70, Om0=0.3, Ode0=0.70, w0=-1, wa=-0.5)
assert allclose(cosmo.de_density_scale(z),
[0.9934201, 0.9767912, 0.897450,
0.622236, 0.4458753], rtol=1e-4)
assert allclose(cosmo.de_density_scale(3),
cosmo.de_density_scale(3.0), rtol=1e-7)
assert allclose(cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)
cosmo = flrw.wpwaCDM(H0=70, Om0=0.3, Ode0=0.70, wp=-0.9,
wa=0.2, zp=0.5)
assert allclose(cosmo.de_density_scale(z),
[1.012246048, 1.0280102, 1.087439,
1.324988, 1.565746], rtol=1e-4)
assert allclose(cosmo.de_density_scale(3),
cosmo.de_density_scale(3.0), rtol=1e-7)
assert allclose(cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_age():
# WMAP7 but with Omega_relativisitic = 0
tcos = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
assert allclose(tcos.hubble_time, 13.889094057856937 * u.Gyr)
assert allclose(tcos.age(4), 1.5823603508870991 * u.Gyr)
assert allclose(tcos.age([1., 5.]),
[5.97113193, 1.20553129] * u.Gyr)
assert allclose(tcos.age([1, 5]), [5.97113193, 1.20553129] * u.Gyr)
# Add relativistic species
tcos = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0)
assert allclose(tcos.age(4), 1.5773003779230699 * u.Gyr)
assert allclose(tcos.age([1, 5]), [5.96344942, 1.20093077] * u.Gyr)
# And massive neutrinos
tcos = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0,
m_nu=0.1 * u.eV)
assert allclose(tcos.age(4), 1.5546485439853412 * u.Gyr)
assert allclose(tcos.age([1, 5]), [5.88448152, 1.18383759] * u.Gyr)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_distmod():
# WMAP7 but with Omega_relativisitic = 0
tcos = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
assert allclose(tcos.hubble_distance, 4258.415596590909 * u.Mpc)
assert allclose(tcos.distmod([1, 5]),
[44.124857, 48.40167258] * u.mag)
assert allclose(tcos.distmod([1., 5.]),
[44.124857, 48.40167258] * u.mag)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_neg_distmod():
# Cosmology with negative luminosity distances (perfectly okay,
# if obscure)
tcos = flrw.LambdaCDM(70, 0.2, 1.3, Tcmb0=0)
assert allclose(tcos.luminosity_distance([50, 100]),
[16612.44047622, -46890.79092244] * u.Mpc)
assert allclose(tcos.distmod([50, 100]),
[46.102167189, 48.355437790944] * u.mag)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_critical_density():
from astropy.constants import codata2014
# WMAP7 but with Omega_relativistic = 0
# These tests will fail if astropy.const starts returning non-mks
# units by default; see the comment at the top of core.py.
# critical_density0 is inversely proportional to G.
tcos = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
fac = (const.G / codata2014.G).to(u.dimensionless_unscaled).value
assert allclose(tcos.critical_density0 * fac,
9.309668456020899e-30 * (u.g / u.cm**3))
assert allclose(tcos.critical_density0,
tcos.critical_density(0))
assert allclose(
tcos.critical_density([1, 5]) * fac,
[2.70352772e-29, 5.53739080e-28] * (u.g / u.cm**3))
assert allclose(
tcos.critical_density([1., 5.]) * fac,
[2.70352772e-29, 5.53739080e-28] * (u.g / u.cm**3))
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_comoving_distance_z1z2():
tcos = flrw.LambdaCDM(100, 0.3, 0.8, Tcmb0=0.0)
with pytest.raises(ValueError): # test diff size z1, z2 fail
tcos._comoving_distance_z1z2((1, 2), (3, 4, 5))
# Comoving distances are invertible
assert allclose(tcos._comoving_distance_z1z2(1, 2),
-tcos._comoving_distance_z1z2(2, 1))
z1 = 0, 0, 2, 0.5, 1
z2 = 2, 1, 1, 2.5, 1.1
results = (3767.90579253,
2386.25591391,
-1381.64987862,
2893.11776663,
174.1524683) * u.Mpc
assert allclose(tcos._comoving_distance_z1z2(z1, z2),
results)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_age_in_special_cosmologies():
"""Check that age in de Sitter and Einstein-de Sitter Universes work.
Some analytic solutions fail at these critical points.
"""
c_dS = flrw.FlatLambdaCDM(100, 0, Tcmb0=0)
assert allclose(c_dS.age(z=0), np.inf * u.Gyr)
assert allclose(c_dS.age(z=1), np.inf * u.Gyr)
assert allclose(c_dS.lookback_time(z=0), 0 * u.Gyr)
assert allclose(c_dS.lookback_time(z=1), 6.777539216261741 * u.Gyr)
c_EdS = flrw.FlatLambdaCDM(100, 1, Tcmb0=0)
assert allclose(c_EdS.age(z=0), 6.518614811154189 * u.Gyr)
assert allclose(c_EdS.age(z=1), 2.3046783684542738 * u.Gyr)
assert allclose(c_EdS.lookback_time(z=0), 0 * u.Gyr)
assert allclose(c_EdS.lookback_time(z=1), 4.213936442699092 * u.Gyr)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_distance_in_special_cosmologies():
"""Check that de Sitter and Einstein-de Sitter Universes both work.
Some analytic solutions fail at these critical points.
"""
c_dS = flrw.FlatLambdaCDM(100, 0, Tcmb0=0)
assert allclose(c_dS.comoving_distance(z=0), 0 * u.Mpc)
assert allclose(c_dS.comoving_distance(z=1), 2997.92458 * u.Mpc)
c_EdS = flrw.FlatLambdaCDM(100, 1, Tcmb0=0)
assert allclose(c_EdS.comoving_distance(z=0), 0 * u.Mpc)
assert allclose(c_EdS.comoving_distance(z=1), 1756.1435599923348 * u.Mpc)
c_dS = flrw.LambdaCDM(100, 0, 1, Tcmb0=0)
assert allclose(c_dS.comoving_distance(z=0), 0 * u.Mpc)
assert allclose(c_dS.comoving_distance(z=1), 2997.92458 * u.Mpc)
c_EdS = flrw.LambdaCDM(100, 1, 0, Tcmb0=0)
assert allclose(c_EdS.comoving_distance(z=0), 0 * u.Mpc)
assert allclose(c_EdS.comoving_distance(z=1), 1756.1435599923348 * u.Mpc)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_comoving_transverse_distance_z1z2():
tcos = flrw.FlatLambdaCDM(100, 0.3, Tcmb0=0.0)
with pytest.raises(ValueError): # test diff size z1, z2 fail
tcos._comoving_transverse_distance_z1z2((1, 2), (3, 4, 5))
# Tests that should actually work, target values computed with
# http://www.astro.multivax.de:8000/phillip/angsiz_prog/README.HTML
# Kayser, Helbig, and Schramm (Astron.Astrophys. 318 (1997) 680-686)
assert allclose(tcos._comoving_transverse_distance_z1z2(1, 2),
1313.2232194828466 * u.Mpc)
# In a flat universe comoving distance and comoving transverse
# distance are identical
z1 = 0, 0, 2, 0.5, 1
z2 = 2, 1, 1, 2.5, 1.1
assert allclose(tcos._comoving_distance_z1z2(z1, z2),
tcos._comoving_transverse_distance_z1z2(z1, z2))
# Test Flat Universe with Omega_M > 1. Rarely used, but perfectly valid.
tcos = flrw.FlatLambdaCDM(100, 1.5, Tcmb0=0.0)
results = (2202.72682564,
1559.51679971,
-643.21002593,
1408.36365679,
85.09286258) * u.Mpc
assert allclose(tcos._comoving_transverse_distance_z1z2(z1, z2),
results)
# In a flat universe comoving distance and comoving transverse
# distance are identical
z1 = 0, 0, 2, 0.5, 1
z2 = 2, 1, 1, 2.5, 1.1
assert allclose(tcos._comoving_distance_z1z2(z1, z2),
tcos._comoving_transverse_distance_z1z2(z1, z2))
# Test non-flat cases to avoid simply testing
# comoving_distance_z1z2. Test array, array case.
tcos = flrw.LambdaCDM(100, 0.3, 0.5, Tcmb0=0.0)
results = (3535.931375645655,
2226.430046551708,
-1208.6817970036532,
2595.567367601969,
151.36592003406884) * u.Mpc
assert allclose(tcos._comoving_transverse_distance_z1z2(z1, z2),
results)
# Test positive curvature with scalar, array combination.
tcos = flrw.LambdaCDM(100, 1.0, 0.2, Tcmb0=0.0)
z1 = 0.1
z2 = 0, 0.1, 0.2, 0.5, 1.1, 2
results = (-281.31602666724865,
0.,
248.58093707820436,
843.9331377460543,
1618.6104987686672,
2287.5626543279927) * u.Mpc
assert allclose(tcos._comoving_transverse_distance_z1z2(z1, z2),
results)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_angular_diameter_distance_z1z2():
tcos = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
with pytest.raises(ValueError): # test diff size z1, z2 fail
tcos.angular_diameter_distance_z1z2([1, 2], [3, 4, 5])
# Tests that should actually work
assert allclose(tcos.angular_diameter_distance_z1z2(1, 2),
646.22968662822018 * u.Mpc)
z1 = 2 # Separate test for z2<z1, returns negative value with warning
z2 = 1
results = -969.34452994 * u.Mpc
with pytest.warns(AstropyUserWarning, match='less than first redshift'):
assert allclose(tcos.angular_diameter_distance_z1z2(z1, z2), results)
z1 = 0, 0, 0.5, 1
z2 = 2, 1, 2.5, 1.1
results = (1760.0628637762106,
1670.7497657219858,
1159.0970895962193,
115.72768186186921) * u.Mpc
assert allclose(tcos.angular_diameter_distance_z1z2(z1, z2),
results)
z1 = 0.1
z2 = 0.1, 0.2, 0.5, 1.1, 2
results = (0.,
332.09893173,
986.35635069,
1508.37010062,
1621.07937976) * u.Mpc
assert allclose(tcos.angular_diameter_distance_z1z2(0.1, z2),
results)
# Non-flat (positive Ok0) test
tcos = flrw.LambdaCDM(H0=70.4, Om0=0.2, Ode0=0.5, Tcmb0=0.0)
assert allclose(tcos.angular_diameter_distance_z1z2(1, 2),
620.1175337852428 * u.Mpc)
# Non-flat (negative Ok0) test
tcos = flrw.LambdaCDM(H0=100, Om0=2, Ode0=1, Tcmb0=0.0)
assert allclose(tcos.angular_diameter_distance_z1z2(1, 2),
228.42914659246014 * u.Mpc)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_absorption_distance():
tcos = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
assert allclose(tcos.absorption_distance([1, 3]),
[1.72576635, 7.98685853])
assert allclose(tcos.absorption_distance([1., 3.]),
[1.72576635, 7.98685853])
assert allclose(tcos.absorption_distance(3), 7.98685853)
assert allclose(tcos.absorption_distance(3.), 7.98685853)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_distances():
# Test distance calculations for various special case
# scenarios (no relativistic species, normal, massive neutrinos)
# These do not come from external codes -- they are just internal
# checks to make sure nothing changes if we muck with the distance
# calculators
z = np.array([1.0, 2.0, 3.0, 4.0])
# The pattern here is: no relativistic species, the relativistic
# species with massless neutrinos, then massive neutrinos
cos = flrw.LambdaCDM(75.0, 0.25, 0.5, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[2953.93001902, 4616.7134253, 5685.07765971,
6440.80611897] * u.Mpc, rtol=1e-4)
cos = flrw.LambdaCDM(75.0, 0.25, 0.6, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[3037.12620424, 4776.86236327, 5889.55164479,
6671.85418235] * u.Mpc, rtol=1e-4)
cos = flrw.LambdaCDM(75.0, 0.3, 0.4, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(10.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2471.80626824, 3567.1902565, 4207.15995626,
4638.20476018] * u.Mpc, rtol=1e-4)
# Flat
cos = flrw.FlatLambdaCDM(75.0, 0.25, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[3180.83488552, 5060.82054204, 6253.6721173,
7083.5374303] * u.Mpc, rtol=1e-4)
cos = flrw.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[3180.42662867, 5059.60529655, 6251.62766102,
7080.71698117] * u.Mpc, rtol=1e-4)
cos = flrw.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(10.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2337.54183142, 3371.91131264, 3988.40711188,
4409.09346922] * u.Mpc, rtol=1e-4)
# Add w
cos = flrw.FlatwCDM(75.0, 0.25, w0=-1.05, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[3216.8296894, 5117.2097601, 6317.05995437,
7149.68648536] * u.Mpc, rtol=1e-4)
cos = flrw.FlatwCDM(75.0, 0.25, w0=-0.95, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[3143.56537758, 5000.32196494, 6184.11444601,
7009.80166062] * u.Mpc, rtol=1e-4)
cos = flrw.FlatwCDM(75.0, 0.25, w0=-0.9, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(10.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2337.76035371, 3372.1971387, 3988.71362289,
4409.40817174] * u.Mpc, rtol=1e-4)
# Non-flat w
cos = flrw.wCDM(75.0, 0.25, 0.4, w0=-0.9, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[2849.6163356, 4428.71661565, 5450.97862778,
6179.37072324] * u.Mpc, rtol=1e-4)
cos = flrw.wCDM(75.0, 0.25, 0.4, w0=-1.1, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2904.35580229, 4511.11471267, 5543.43643353,
6275.9206788] * u.Mpc, rtol=1e-4)
cos = flrw.wCDM(75.0, 0.25, 0.4, w0=-0.9, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(10.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2473.32522734, 3581.54519631, 4232.41674426,
4671.83818117] * u.Mpc, rtol=1e-4)
# w0wa
cos = flrw.w0waCDM(75.0, 0.3, 0.6, w0=-0.9, wa=0.1, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[2937.7807638, 4572.59950903, 5611.52821924,
6339.8549956] * u.Mpc, rtol=1e-4)
cos = flrw.w0waCDM(75.0, 0.25, 0.5, w0=-0.9, wa=0.1, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2907.34722624, 4539.01723198, 5593.51611281,
6342.3228444] * u.Mpc, rtol=1e-4)
cos = flrw.w0waCDM(75.0, 0.25, 0.5, w0=-0.9, wa=0.1, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(10.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2507.18336722, 3633.33231695, 4292.44746919,
4736.35404638] * u.Mpc, rtol=1e-4)
# Flatw0wa
cos = flrw.Flatw0waCDM(75.0, 0.25, w0=-0.95, wa=0.15, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[3123.29892781, 4956.15204302, 6128.15563818,
6948.26480378] * u.Mpc, rtol=1e-4)
cos = flrw.Flatw0waCDM(75.0, 0.25, w0=-0.95, wa=0.15, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[3122.92671907, 4955.03768936, 6126.25719576,
6945.61856513] * u.Mpc, rtol=1e-4)
cos = flrw.Flatw0waCDM(75.0, 0.25, w0=-0.95, wa=0.15, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(10.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2337.70072701, 3372.13719963, 3988.6571093,
4409.35399673] * u.Mpc, rtol=1e-4)
# wpwa
cos = flrw.wpwaCDM(75.0, 0.3, 0.6, wp=-0.9, zp=0.5, wa=0.1, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[2954.68975298, 4599.83254834, 5643.04013201,
6373.36147627] * u.Mpc, rtol=1e-4)
cos = flrw.wpwaCDM(75.0, 0.25, 0.5, wp=-0.9, zp=0.4, wa=0.1,
Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2919.00656215, 4558.0218123, 5615.73412391,
6366.10224229] * u.Mpc, rtol=1e-4)
cos = flrw.wpwaCDM(75.0, 0.25, 0.5, wp=-0.9, zp=1.0, wa=0.1, Tcmb0=3.0,
Neff=4, m_nu=u.Quantity(5.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2629.48489827, 3874.13392319, 4614.31562397,
5116.51184842] * u.Mpc, rtol=1e-4)
# w0wz
cos = flrw.w0wzCDM(75.0, 0.3, 0.6, w0=-0.9, wz=0.1, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[3051.68786716, 4756.17714818, 5822.38084257,
6562.70873734] * u.Mpc, rtol=1e-4)
cos = flrw.w0wzCDM(75.0, 0.25, 0.5, w0=-0.9, wz=0.1,
Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2997.8115653, 4686.45599916, 5764.54388557,
6524.17408738] * u.Mpc, rtol=1e-4)
cos = flrw.w0wzCDM(75.0, 0.25, 0.5, w0=-0.9, wz=0.1, Tcmb0=3.0,
Neff=4, m_nu=u.Quantity(5.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2676.73467639, 3940.57967585, 4686.90810278,
5191.54178243] * u.Mpc, rtol=1e-4)
# Also test different numbers of massive neutrinos
# for FlatLambdaCDM to give the scalar nu density functions a
# work out
cos = flrw.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0,
m_nu=u.Quantity([10.0, 0, 0], u.eV))
assert allclose(cos.comoving_distance(z),
[2777.71589173, 4186.91111666, 5046.0300719,
5636.10397302] * u.Mpc, rtol=1e-4)
cos = flrw.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0,
m_nu=u.Quantity([10.0, 5, 0], u.eV))
assert allclose(cos.comoving_distance(z),
[2636.48149391, 3913.14102091, 4684.59108974,
5213.07557084] * u.Mpc, rtol=1e-4)
cos = flrw.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0,
m_nu=u.Quantity([4.0, 5, 9], u.eV))
assert allclose(cos.comoving_distance(z),
[2563.5093049, 3776.63362071, 4506.83448243,
5006.50158829] * u.Mpc, rtol=1e-4)
cos = flrw.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=4.2,
m_nu=u.Quantity([1.0, 4.0, 5, 9], u.eV))
assert allclose(cos.comoving_distance(z),
[2525.58017482, 3706.87633298, 4416.58398847,
4901.96669755] * u.Mpc, rtol=1e-4)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_massivenu_density():
# Testing neutrino density calculation
# Simple test cosmology, where we compare rho_nu and rho_gamma
# against the exact formula (eq 24/25 of Komatsu et al. 2011)
# computed using Mathematica. The approximation we use for f(y)
# is only good to ~ 0.5% (with some redshift dependence), so that's
# what we test to.
ztest = np.array([0.0, 1.0, 2.0, 10.0, 1000.0])
nuprefac = 7.0 / 8.0 * (4.0 / 11.0) ** (4.0 / 3.0)
# First try 3 massive neutrinos, all 100 eV -- note this is a universe
# seriously dominated by neutrinos!
tcos = flrw.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(100.0, u.eV))
assert tcos.has_massive_nu
assert tcos.Neff == 3
nurel_exp = nuprefac * tcos.Neff * np.array([171969, 85984.5, 57323,
15633.5, 171.801])
assert allclose(tcos.nu_relative_density(ztest), nurel_exp, rtol=5e-3)
assert allclose(tcos.efunc([0.0, 1.0]), [1.0, 7.46144727668], rtol=5e-3)
# Next, slightly less massive
tcos = flrw.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.25, u.eV))
nurel_exp = nuprefac * tcos.Neff * np.array([429.924, 214.964, 143.312,
39.1005, 1.11086])
assert allclose(tcos.nu_relative_density(ztest), nurel_exp, rtol=5e-3)
# For this one also test Onu directly
onu_exp = np.array([0.01890217, 0.05244681, 0.0638236,
0.06999286, 0.1344951])
assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
# And fairly light
tcos = flrw.FlatLambdaCDM(80.0, 0.30, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.01, u.eV))
nurel_exp = nuprefac * tcos.Neff * np.array([17.2347, 8.67345, 5.84348,
1.90671, 1.00021])
assert allclose(tcos.nu_relative_density(ztest), nurel_exp,
rtol=5e-3)
onu_exp = np.array([0.00066599, 0.00172677, 0.0020732,
0.00268404, 0.0978313])
assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
assert allclose(tcos.efunc([1.0, 2.0]), [1.76225893, 2.97022048],
rtol=1e-4)
assert allclose(tcos.inv_efunc([1.0, 2.0]), [0.5674535, 0.33667534],
rtol=1e-4)
# Now a mixture of neutrino masses, with non-integer Neff
tcos = flrw.FlatLambdaCDM(80.0, 0.30, Tcmb0=3.0, Neff=3.04,
m_nu=u.Quantity([0.0, 0.01, 0.25], u.eV))
nurel_exp = (nuprefac * tcos.Neff
* np.array([149.386233, 74.87915, 50.0518, 14.002403, 1.03702333]))
assert allclose(tcos.nu_relative_density(ztest), nurel_exp,
rtol=5e-3)
onu_exp = np.array([0.00584959, 0.01493142, 0.01772291,
0.01963451, 0.10227728])
assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
# Integer redshifts
ztest = ztest.astype(int)
assert allclose(tcos.nu_relative_density(ztest), nurel_exp,
rtol=5e-3)
assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_elliptic_comoving_distance_z1z2():
"""Regression test for #8388."""
cosmo = flrw.LambdaCDM(70., 2.3, 0.05, Tcmb0=0)
z = 0.2
assert allclose(cosmo.comoving_distance(z),
cosmo._integral_comoving_distance_z1z2(0., z))
assert allclose(cosmo._elliptic_comoving_distance_z1z2(0., z),
cosmo._integral_comoving_distance_z1z2(0., z))
SPECIALIZED_COMOVING_DISTANCE_COSMOLOGIES = [
flrw.FlatLambdaCDM(H0=70, Om0=0.0, Tcmb0=0.0), # de Sitter
flrw.FlatLambdaCDM(H0=70, Om0=1.0, Tcmb0=0.0), # Einstein - de Sitter
flrw.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=0.0), # Hypergeometric
flrw.LambdaCDM(H0=70, Om0=0.3, Ode0=0.6, Tcmb0=0.0), # Elliptic
]
ITERABLE_REDSHIFTS = [
(0, 1, 2, 3, 4), # tuple
[0, 1, 2, 3, 4], # list
np.array([0, 1, 2, 3, 4]), # array
]
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
@pytest.mark.parametrize('cosmo', SPECIALIZED_COMOVING_DISTANCE_COSMOLOGIES)
@pytest.mark.parametrize('z', ITERABLE_REDSHIFTS)
def test_comoving_distance_iterable_argument(cosmo, z):
"""
Regression test for #10980
Test that specialized comoving distance methods handle iterable arguments.
"""
assert allclose(cosmo.comoving_distance(z),
cosmo._integral_comoving_distance_z1z2(0., z))
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
@pytest.mark.parametrize('cosmo', SPECIALIZED_COMOVING_DISTANCE_COSMOLOGIES)
def test_comoving_distance_broadcast(cosmo):
"""
Regression test for #10980
Test that specialized comoving distance methods broadcast array arguments.
"""
z1 = np.zeros((2, 5))
z2 = np.ones((3, 1, 5))
z3 = np.ones((7, 5))
output_shape = np.broadcast(z1, z2).shape
# Check compatible array arguments return an array with the correct shape
assert cosmo._comoving_distance_z1z2(z1, z2).shape == output_shape
# Check incompatible array arguments raise an error
with pytest.raises(ValueError, match='z1 and z2 have different shapes'):
cosmo._comoving_distance_z1z2(z1, z3)
|
eae35d636e0514ecbe6476ff1b24f2f1f0d79ca8421d70d84d8a6f9716894903 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# STDLIB
import pickle
# THIRD PARTY
import pytest
# LOCAL
import astropy.cosmology.units as cu
import astropy.units as u
from astropy import cosmology
from astropy.cosmology import parameters, realizations
from astropy.cosmology.realizations import Planck13, default_cosmology
def test_realizations_in_toplevel_dir():
"""Test the realizations are in ``dir`` of :mod:`astropy.cosmology`."""
d = dir(cosmology)
assert set(d) == set(cosmology.__all__)
for n in parameters.available:
assert n in d
def test_realizations_in_realizations_dir():
"""Test the realizations are in ``dir`` of :mod:`astropy.cosmology.realizations`."""
d = dir(realizations)
assert set(d) == set(realizations.__all__)
for n in parameters.available:
assert n in d
class Test_default_cosmology(object):
"""Tests for :class:`~astropy.cosmology.realizations.default_cosmology`."""
# -----------------------------------------------------
# Get
def test_get_current(self):
"""Test :meth:`astropy.cosmology.default_cosmology.get` current value."""
cosmo = default_cosmology.get()
assert cosmo is default_cosmology.validate(default_cosmology._value)
# -----------------------------------------------------
# get_cosmology_from_string (deprecated)
def test_get_cosmology_from_string(self, recwarn):
"""Test method ``get_cosmology_from_string``."""
cosmo = default_cosmology.get_cosmology_from_string("no_default")
assert cosmo is None
cosmo = default_cosmology.get_cosmology_from_string("Planck13")
assert cosmo is Planck13
with pytest.raises(ValueError):
cosmo = default_cosmology.get_cosmology_from_string("fail!")
# -----------------------------------------------------
# Validate
def test_validate_fail(self):
"""Test :meth:`astropy.cosmology.default_cosmology.validate`."""
# bad input type
with pytest.raises(TypeError, match="must be a string or Cosmology"):
default_cosmology.validate(TypeError)
# a not-valid option, but still a str
with pytest.raises(ValueError, match="Unknown cosmology"):
default_cosmology.validate("fail!")
# a not-valid type
with pytest.raises(TypeError, match="cannot find a Cosmology"):
default_cosmology.validate("available")
def test_validate_default(self):
"""Test method ``validate`` for specific values."""
value = default_cosmology.validate(None)
assert value is realizations.Planck18
@pytest.mark.parametrize("name", parameters.available)
def test_validate_str(self, name):
"""Test method ``validate`` for string input."""
value = default_cosmology.validate(name)
assert value is getattr(realizations, name)
@pytest.mark.parametrize("name", parameters.available)
def test_validate_cosmo(self, name):
"""Test method ``validate`` for cosmology instance input."""
cosmo = getattr(realizations, name)
value = default_cosmology.validate(cosmo)
assert value is cosmo
def test_validate_no_default(self):
"""Test :meth:`astropy.cosmology.default_cosmology.get` to `None`."""
cosmo = default_cosmology.validate("no_default")
assert cosmo is None
@pytest.mark.parametrize("name", parameters.available)
def test_pickle_builtin_realizations(name, pickle_protocol):
"""
Test in-built realizations can pickle and unpickle.
Also a regression test for #12008.
"""
# get class instance
original = getattr(cosmology, name)
# pickle and unpickle
f = pickle.dumps(original, protocol=pickle_protocol)
with u.add_enabled_units(cu):
unpickled = pickle.loads(f)
assert unpickled == original
assert unpickled.meta == original.meta
# if the units are not enabled, it isn't equal because redshift units
# are not equal. This is a weird, known issue.
unpickled = pickle.loads(f)
assert unpickled == original
assert unpickled.meta != original.meta
|
5749259abf7b2ef7ec32731f25db05bb609a370854dace1dc4c893489590c01b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import inspect
import sys
from io import StringIO
import numpy as np
import pytest
from astropy import units as u
from astropy.cosmology import core, flrw
from astropy.cosmology.funcs import _z_at_scalar_value, z_at_value
from astropy.cosmology.realizations import (
WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15, Planck18)
from astropy.units import allclose
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
from astropy.utils.exceptions import AstropyUserWarning
@pytest.mark.skipif('not HAS_SCIPY')
def test_z_at_value_scalar():
# These are tests of expected values, and hence have less precision
# than the roundtrip tests below (test_z_at_value_roundtrip);
# here we have to worry about the cosmological calculations
# giving slightly different values on different architectures,
# there we are checking internal consistency on the same architecture
# and so can be more demanding
cosmo = Planck13
assert allclose(z_at_value(cosmo.age, 2 * u.Gyr), 3.19812268, rtol=1e-6)
assert allclose(z_at_value(cosmo.lookback_time, 7 * u.Gyr), 0.795198375, rtol=1e-6)
assert allclose(z_at_value(cosmo.distmod, 46 * u.mag), 1.991389168, rtol=1e-6)
assert allclose(z_at_value(cosmo.luminosity_distance, 1e4 * u.Mpc), 1.36857907, rtol=1e-6)
assert allclose(z_at_value(cosmo.luminosity_distance, 26.037193804 * u.Gpc, ztol=1e-10),
3, rtol=1e-9)
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, zmax=2),
0.681277696, rtol=1e-6)
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, zmin=2.5),
3.7914908, rtol=1e-6)
# test behavior when the solution is outside z limits (should
# raise a CosmologyError)
with pytest.raises(core.CosmologyError):
with pytest.warns(AstropyUserWarning, match=r'fval is not bracketed'):
z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, zmax=0.5)
with pytest.raises(core.CosmologyError):
with pytest.warns(AstropyUserWarning, match=r'fval is not bracketed'):
z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, zmin=4.)
@pytest.mark.skipif('not HAS_SCIPY')
class Test_ZatValue:
def setup_class(self):
self.cosmo = Planck13
def test_broadcast_arguments(self):
"""Test broadcast of arguments."""
# broadcasting main argument
assert allclose(
z_at_value(self.cosmo.age, [2, 7] * u.Gyr),
[3.1981206134773115, 0.7562044333305182], rtol=1e-6)
# basic broadcast of secondary arguments
assert allclose(
z_at_value(self.cosmo.angular_diameter_distance, 1500 * u.Mpc,
zmin=[0, 2.5], zmax=[2, 4]),
[0.681277696, 3.7914908], rtol=1e-6)
# more interesting broadcast
assert allclose(
z_at_value(self.cosmo.angular_diameter_distance, 1500 * u.Mpc,
zmin=[[0, 2.5]], zmax=[2, 4]),
[[0.681277696, 3.7914908]], rtol=1e-6)
def test_broadcast_bracket(self):
"""`bracket` has special requirements."""
# start with an easy one
assert allclose(
z_at_value(self.cosmo.age, 2 * u.Gyr, bracket=None),
3.1981206134773115, rtol=1e-6)
# now actually have a bracket
assert allclose(
z_at_value(self.cosmo.age, 2 * u.Gyr, bracket=[0, 4]),
3.1981206134773115, rtol=1e-6)
# now a bad length
with pytest.raises(ValueError, match="sequence"):
z_at_value(self.cosmo.age, 2 * u.Gyr, bracket=[0, 4, 4, 5])
# now the wrong dtype : an ndarray, but not an object array
with pytest.raises(TypeError, match="dtype"):
z_at_value(self.cosmo.age, 2 * u.Gyr, bracket=np.array([0, 4]))
# now an object array of brackets
bracket = np.array([[0, 4], [0, 3, 4]], dtype=object)
assert allclose(
z_at_value(self.cosmo.age, 2 * u.Gyr, bracket=bracket),
[3.1981206134773115, 3.1981206134773115], rtol=1e-6)
def test_bad_broadcast(self):
"""Shapes mismatch as expected"""
with pytest.raises(ValueError, match="broadcast"):
z_at_value(self.cosmo.angular_diameter_distance, 1500 * u.Mpc,
zmin=[0, 2.5, 0.1], zmax=[2, 4])
def test_scalar_input_to_output(self):
"""Test scalar input returns a scalar."""
z = z_at_value(self.cosmo.angular_diameter_distance, 1500 * u.Mpc,
zmin=0, zmax=2)
assert isinstance(z, u.Quantity)
assert z.dtype == np.float64
assert z.shape == ()
@pytest.mark.skipif('not HAS_SCIPY')
def test_z_at_value_numpyvectorize():
"""Test that numpy vectorize fails on Quantities.
If this test starts failing then numpy vectorize can be used instead of
the home-brewed vectorization. Please submit a PR making the change.
"""
z_at_value = np.vectorize(_z_at_scalar_value,
excluded=["func", "method", "verbose"])
with pytest.raises(u.UnitConversionError, match="dimensionless quantities"):
z_at_value(Planck15.age, 10*u.Gyr)
@pytest.mark.skipif('not HAS_SCIPY')
def test_z_at_value_verbose(monkeypatch):
cosmo = Planck13
# Test the "verbose" flag. Since this uses "print", need to mod stdout
mock_stdout = StringIO()
monkeypatch.setattr(sys, 'stdout', mock_stdout)
resx = z_at_value(cosmo.age, 2 * u.Gyr, verbose=True)
assert str(resx.value) in mock_stdout.getvalue() # test "verbose" prints res
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('method', ['Brent', 'Golden', 'Bounded'])
def test_z_at_value_bracketed(method):
"""
Test 2 solutions for angular diameter distance by not constraining zmin, zmax,
but setting `bracket` on the appropriate side of the turning point z.
Setting zmin / zmax should override `bracket`.
"""
cosmo = Planck13
if method == 'Bounded':
with pytest.warns(AstropyUserWarning, match=r'fval is not bracketed'):
z = z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method)
if z > 1.6:
z = 3.7914908
bracket = (0.9, 1.5)
else:
z = 0.6812777
bracket = (1.6, 2.0)
with pytest.warns(UserWarning, match=r"Option 'bracket' is ignored"):
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method,
bracket=bracket), z, rtol=1e-6)
else:
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method,
bracket=(0.3, 1.0)), 0.6812777, rtol=1e-6)
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method,
bracket=(2.0, 4.0)), 3.7914908, rtol=1e-6)
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method,
bracket=(0.1, 1.5)), 0.6812777, rtol=1e-6)
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method,
bracket=(0.1, 1.0, 2.0)), 0.6812777, rtol=1e-6)
with pytest.warns(AstropyUserWarning, match=r'fval is not bracketed'):
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method,
bracket=(0.9, 1.5)), 0.6812777, rtol=1e-6)
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method,
bracket=(1.6, 2.0)), 3.7914908, rtol=1e-6)
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method,
bracket=(1.6, 2.0), zmax=1.6), 0.6812777, rtol=1e-6)
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method,
bracket=(0.9, 1.5), zmin=1.5), 3.7914908, rtol=1e-6)
with pytest.raises(core.CosmologyError):
with pytest.warns(AstropyUserWarning, match=r'fval is not bracketed'):
z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method,
bracket=(3.9, 5.0), zmin=4.)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('method', ['Brent', 'Golden', 'Bounded'])
def test_z_at_value_unconverged(method):
"""
Test warnings on non-converged solution when setting `maxfun` to too small iteration number -
only 'Bounded' returns status value and specific message.
"""
cosmo = Planck18
ztol = {'Brent': [1e-4, 1e-4], 'Golden': [1e-3, 1e-2], 'Bounded': [1e-3, 1e-1]}
if method == 'Bounded':
ctx = pytest.warns(AstropyUserWarning, match='Solver returned 1: Maximum number of '
'function calls reached')
else:
ctx = pytest.warns(AstropyUserWarning, match='Solver returned None')
with ctx:
z0 = z_at_value(cosmo.angular_diameter_distance, 1*u.Gpc, zmax=2, maxfun=13, method=method)
with ctx:
z1 = z_at_value(cosmo.angular_diameter_distance, 1*u.Gpc, zmin=2, maxfun=13, method=method)
assert allclose(z0, 0.32442, rtol=ztol[method][0])
assert allclose(z1, 8.18551, rtol=ztol[method][1])
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('cosmo', [Planck13, Planck15, Planck18, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9,
flrw.LambdaCDM, flrw.FlatLambdaCDM, flrw.wpwaCDM, flrw.w0wzCDM,
flrw.wCDM, flrw.FlatwCDM, flrw.w0waCDM, flrw.Flatw0waCDM])
def test_z_at_value_roundtrip(cosmo):
"""
Calculate values from a known redshift, and then check that
z_at_value returns the right answer.
"""
z = 0.5
# Skip Ok, w, de_density_scale because in the Planck cosmologies
# they are redshift independent and hence uninvertable,
# *_distance_z1z2 methods take multiple arguments, so require
# special handling
# clone is not a redshift-dependent method
# nu_relative_density is not redshift-dependent in the WMAP cosmologies
skip = ('Ok', 'Otot',
'angular_diameter_distance_z1z2',
'clone', 'is_equivalent',
'de_density_scale', 'w')
if str(cosmo.name).startswith('WMAP'):
skip += ('nu_relative_density', )
methods = inspect.getmembers(cosmo, predicate=inspect.ismethod)
for name, func in methods:
if name.startswith('_') or name in skip:
continue
fval = func(z)
# we need zmax here to pick the right solution for
# angular_diameter_distance and related methods.
# Be slightly more generous with rtol than the default 1e-8
# used in z_at_value
got = z_at_value(func, fval, bracket=[0.3, 1.0], ztol=1e-12)
assert allclose(got, z, rtol=2e-11), f'Round-trip testing {name} failed'
# Test distance functions between two redshifts; only for realizations
if isinstance(cosmo.name, str):
z2 = 2.0
func_z1z2 = [
lambda z1: cosmo._comoving_distance_z1z2(z1, z2),
lambda z1: cosmo._comoving_transverse_distance_z1z2(z1, z2),
lambda z1: cosmo.angular_diameter_distance_z1z2(z1, z2)
]
for func in func_z1z2:
fval = func(z)
assert allclose(z, z_at_value(func, fval, zmax=1.5, ztol=1e-12), rtol=2e-11)
|
37c13cc86415a41bc03b9e1b1405bdcba258996ae650226dc907ac0fe878320c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Astropy FLRW classes."""
from . import base, lambdacdm, w0cdm, w0wacdm, w0wzcdm, wpwazpcdm
from .base import * # noqa: F401, F403
from .lambdacdm import * # noqa: F401, F403
from .w0cdm import * # noqa: F401, F403
from .w0wacdm import * # noqa: F401, F403
from .w0wzcdm import * # noqa: F401, F403
from .wpwazpcdm import * # noqa: F401, F403
__all__ = (base.__all__ + lambdacdm.__all__ + w0cdm.__all__
+ w0wacdm.__all__ + wpwazpcdm.__all__ + w0wzcdm.__all__)
def __getattr__(attr):
"""Lazy import deprecated private API."""
base_attrs = ("H0units_to_invs", "a_B_c2", "critdens_const", "kB_evK",
"radian_in_arcmin", "radian_in_arcsec", "sec_to_Gyr")
if attr in base_attrs + ("quad", ) + ("ellipkinc", "hyp2f1"):
import warnings
from astropy.utils.exceptions import AstropyDeprecationWarning
from . import base, lambdacdm
msg = (f"`astropy.cosmology.flrw.{attr}` is a private variable (since "
"v5.1) and in future will raise an exception.")
warnings.warn(msg, AstropyDeprecationWarning)
if attr in base_attrs:
return getattr(base, "_" + attr)
elif attr == "quad":
return getattr(base, attr)
elif attr in ("ellipkinc", "hyp2f1"):
return getattr(lambdacdm, attr)
raise AttributeError(f"module {__name__!r} has no attribute {attr!r}.")
|
b4e4109aaf23c28fb9e0fada49118e15bda26bc55ac2342cf7f12a2c20164023 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import annotations
import warnings
from abc import abstractmethod
from math import exp, floor, log, pi, sqrt
from numbers import Number
from typing import Any, Mapping, Optional, TypeVar
import numpy as np
from numpy import inf, sin
import astropy.constants as const
import astropy.units as u
from astropy.cosmology.core import Cosmology, FlatCosmologyMixin
from astropy.cosmology.parameter import Parameter, _validate_non_negative, _validate_with_unit
from astropy.cosmology.utils import aszarr, vectorize_redshift_method
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.utils.decorators import lazyproperty
from astropy.utils.exceptions import AstropyUserWarning
# isort: split
if HAS_SCIPY:
from scipy.integrate import quad
else:
def quad(*args, **kwargs):
raise ModuleNotFoundError("No module named 'scipy.integrate'")
__all__ = ["FLRW", "FlatFLRWMixin"]
__doctest_requires__ = {'*': ['scipy']}
##############################################################################
# Parameters
# Some conversion constants -- useful to compute them once here and reuse in
# the initialization rather than have every object do them.
_H0units_to_invs = (u.km / (u.s * u.Mpc)).to(1.0 / u.s)
_sec_to_Gyr = u.s.to(u.Gyr)
# const in critical density in cgs units (g cm^-3)
_critdens_const = (3 / (8 * pi * const.G)).cgs.value
# angle conversions
_radian_in_arcsec = (1 * u.rad).to(u.arcsec)
_radian_in_arcmin = (1 * u.rad).to(u.arcmin)
# Radiation parameter over c^2 in cgs (g cm^-3 K^-4)
_a_B_c2 = (4 * const.sigma_sb / const.c ** 3).cgs.value
# Boltzmann constant in eV / K
_kB_evK = const.k_B.to(u.eV / u.K)
# typing
_FLRWT = TypeVar("_FLRWT", bound="FLRW")
_FlatFLRWMixinT = TypeVar("_FlatFLRWMixinT", bound="FlatFLRWMixin")
##############################################################################
class FLRW(Cosmology):
"""
A class describing an isotropic and homogeneous
(Friedmann-Lemaitre-Robertson-Walker) cosmology.
This is an abstract base class -- you cannot instantiate examples of this
class, but must work with one of its subclasses, such as
:class:`~astropy.cosmology.LambdaCDM` or :class:`~astropy.cosmology.wCDM`.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0. Note that this does not include massive
neutrinos.
Ode0 : float
Omega dark energy: density of dark energy in units of the critical
density at z=0.
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any computation
that requires its value will raise an exception.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Notes
-----
Class instances are immutable -- you cannot change the parameters' values.
That is, all of the above attributes (except meta) are read only.
For details on how to create performant custom subclasses, see the
documentation on :ref:`astropy-cosmology-fast-integrals`.
"""
H0 = Parameter(doc="Hubble constant as an `~astropy.units.Quantity` at z=0.",
unit="km/(s Mpc)", fvalidate="scalar")
Om0 = Parameter(doc="Omega matter; matter density/critical density at z=0.",
fvalidate="non-negative")
Ode0 = Parameter(doc="Omega dark energy; dark energy density/critical density at z=0.",
fvalidate="float")
Tcmb0 = Parameter(doc="Temperature of the CMB as `~astropy.units.Quantity` at z=0.",
unit="Kelvin", fvalidate="scalar")
Neff = Parameter(doc="Number of effective neutrino species.", fvalidate="non-negative")
m_nu = Parameter(doc="Mass of neutrino species.",
unit="eV", equivalencies=u.mass_energy())
Ob0 = Parameter(doc="Omega baryon; baryonic matter density/critical density at z=0.")
def __init__(self, H0, Om0, Ode0, Tcmb0=0.0*u.K, Neff=3.04, m_nu=0.0*u.eV,
Ob0=None, *, name=None, meta=None):
super().__init__(name=name, meta=meta)
# Assign (and validate) Parameters
self.H0 = H0
self.Om0 = Om0
self.Ode0 = Ode0
self.Tcmb0 = Tcmb0
self.Neff = Neff
self.m_nu = m_nu # (reset later, this is just for unit validation)
self.Ob0 = Ob0 # (must be after Om0)
# Derived quantities:
# Dark matter density; matter - baryons, if latter is not None.
self._Odm0 = None if Ob0 is None else (self._Om0 - self._Ob0)
# 100 km/s/Mpc * h = H0 (so h is dimensionless)
self._h = self._H0.value / 100.0
# Hubble distance
self._hubble_distance = (const.c / self._H0).to(u.Mpc)
# H0 in s^-1
H0_s = self._H0.value * _H0units_to_invs
# Hubble time
self._hubble_time = (_sec_to_Gyr / H0_s) << u.Gyr
# Critical density at z=0 (grams per cubic cm)
cd0value = _critdens_const * H0_s ** 2
self._critical_density0 = cd0value << u.g / u.cm ** 3
# Compute photon density from Tcmb
self._Ogamma0 = _a_B_c2 * self._Tcmb0.value ** 4 / self._critical_density0.value
# Compute Neutrino temperature:
# The constant in front is (4/11)^1/3 -- see any cosmology book for an
# explanation -- for example, Weinberg 'Cosmology' p 154 eq (3.1.21).
self._Tnu0 = 0.7137658555036082 * self._Tcmb0
# Compute neutrino parameters:
if self._m_nu is None:
self._nneutrinos = 0
self._neff_per_nu = None
self._massivenu = False
self._massivenu_mass = None
self._nmassivenu = self._nmasslessnu = None
else:
self._nneutrinos = floor(self._Neff)
# We are going to share Neff between the neutrinos equally. In
# detail this is not correct, but it is a standard assumption
# because properly calculating it is a) complicated b) depends on
# the details of the massive neutrinos (e.g., their weak
# interactions, which could be unusual if one is considering
# sterile neutrinos).
self._neff_per_nu = self._Neff / self._nneutrinos
# Now figure out if we have massive neutrinos to deal with, and if
# so, get the right number of masses. It is worth keeping track of
# massless ones separately (since they are easy to deal with, and a
# common use case is to have only one massive neutrino).
massive = np.nonzero(self._m_nu.value > 0)[0]
self._massivenu = massive.size > 0
self._nmassivenu = len(massive)
self._massivenu_mass = self._m_nu[massive].value if self._massivenu else None
self._nmasslessnu = self._nneutrinos - self._nmassivenu
# Compute Neutrino Omega and total relativistic component for massive
# neutrinos. We also store a list version, since that is more efficient
# to do integrals with (perhaps surprisingly! But small python lists
# are more efficient than small NumPy arrays).
if self._massivenu: # (`_massivenu` set in `m_nu`)
nu_y = self._massivenu_mass / (_kB_evK * self._Tnu0)
self._nu_y = nu_y.value
self._nu_y_list = self._nu_y.tolist()
self._Onu0 = self._Ogamma0 * self.nu_relative_density(0)
else:
# This case is particularly simple, so do it directly The 0.2271...
# is 7/8 (4/11)^(4/3) -- the temperature bit ^4 (blackbody energy
# density) times 7/8 for FD vs. BE statistics.
self._Onu0 = 0.22710731766 * self._Neff * self._Ogamma0
self._nu_y = self._nu_y_list = None
# Compute curvature density
self._Ok0 = 1.0 - self._Om0 - self._Ode0 - self._Ogamma0 - self._Onu0
# Subclasses should override this reference if they provide
# more efficient scalar versions of inv_efunc.
self._inv_efunc_scalar = self.inv_efunc
self._inv_efunc_scalar_args = ()
# ---------------------------------------------------------------
# Parameter details
@Ob0.validator
def Ob0(self, param, value):
"""Validate baryon density to None or positive float > matter density."""
if value is None:
return value
value = _validate_non_negative(self, param, value)
if value > self.Om0:
raise ValueError("baryonic density can not be larger than total matter density.")
return value
@m_nu.validator
def m_nu(self, param, value):
"""Validate neutrino masses to right value, units, and shape.
There are no neutrinos if floor(Neff) or Tcmb0 are 0.
The number of neutrinos must match floor(Neff).
Neutrino masses cannot be negative.
"""
# Check if there are any neutrinos
if (nneutrinos := floor(self._Neff)) == 0 or self._Tcmb0.value == 0:
return None # None, regardless of input
# Validate / set units
value = _validate_with_unit(self, param, value)
# Check values and data shapes
if value.shape not in ((), (nneutrinos,)):
raise ValueError("unexpected number of neutrino masses — "
f"expected {nneutrinos}, got {len(value)}.")
elif np.any(value.value < 0):
raise ValueError("invalid (negative) neutrino mass encountered.")
# scalar -> array
if value.isscalar:
value = np.full_like(value, value, shape=nneutrinos)
return value
# ---------------------------------------------------------------
# properties
@property
def is_flat(self):
"""Return bool; `True` if the cosmology is flat."""
return bool((self._Ok0 == 0.0) and (self.Otot0 == 1.0))
@property
def Otot0(self):
"""Omega total; the total density/critical density at z=0."""
return self._Om0 + self._Ogamma0 + self._Onu0 + self._Ode0 + self._Ok0
@property
def Odm0(self):
"""Omega dark matter; dark matter density/critical density at z=0."""
return self._Odm0
@property
def Ok0(self):
"""Omega curvature; the effective curvature density/critical density at z=0."""
return self._Ok0
@property
def Tnu0(self):
"""Temperature of the neutrino background as `~astropy.units.Quantity` at z=0."""
return self._Tnu0
@property
def has_massive_nu(self):
"""Does this cosmology have at least one massive neutrino species?"""
if self._Tnu0.value == 0:
return False
return self._massivenu
@property
def h(self):
"""Dimensionless Hubble constant: h = H_0 / 100 [km/sec/Mpc]."""
return self._h
@property
def hubble_time(self):
"""Hubble time as `~astropy.units.Quantity`."""
return self._hubble_time
@property
def hubble_distance(self):
"""Hubble distance as `~astropy.units.Quantity`."""
return self._hubble_distance
@property
def critical_density0(self):
"""Critical density as `~astropy.units.Quantity` at z=0."""
return self._critical_density0
@property
def Ogamma0(self):
"""Omega gamma; the density/critical density of photons at z=0."""
return self._Ogamma0
@property
def Onu0(self):
"""Omega nu; the density/critical density of neutrinos at z=0."""
return self._Onu0
# ---------------------------------------------------------------
@abstractmethod
def w(self, z):
r"""The dark energy equation of state.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
w : ndarray or float
The dark energy equation of state.
`float` if scalar input.
Notes
-----
The dark energy equation of state is defined as
:math:`w(z) = P(z)/\rho(z)`, where :math:`P(z)` is the pressure at
redshift z and :math:`\rho(z)` is the density at redshift z, both in
units where c=1.
This must be overridden by subclasses.
"""
raise NotImplementedError("w(z) is not implemented")
def Otot(self, z):
"""The total density parameter at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
Otot : ndarray or float
The total density relative to the critical density at each redshift.
Returns float if input scalar.
"""
return self.Om(z) + self.Ogamma(z) + self.Onu(z) + self.Ode(z) + self.Ok(z)
def Om(self, z):
"""
Return the density parameter for non-relativistic matter
at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Om : ndarray or float
The density of non-relativistic matter relative to the critical
density at each redshift.
Returns `float` if the input is scalar.
Notes
-----
This does not include neutrinos, even if non-relativistic at the
redshift of interest; see `Onu`.
"""
z = aszarr(z)
return self._Om0 * (z + 1.0) ** 3 * self.inv_efunc(z) ** 2
def Ob(self, z):
"""Return the density parameter for baryonic matter at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Ob : ndarray or float
The density of baryonic matter relative to the critical density at
each redshift.
Returns `float` if the input is scalar.
Raises
------
ValueError
If ``Ob0`` is `None`.
"""
if self._Ob0 is None:
raise ValueError("Baryon density not set for this cosmology")
z = aszarr(z)
return self._Ob0 * (z + 1.0) ** 3 * self.inv_efunc(z) ** 2
def Odm(self, z):
"""Return the density parameter for dark matter at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Odm : ndarray or float
The density of non-relativistic dark matter relative to the
critical density at each redshift.
Returns `float` if the input is scalar.
Raises
------
ValueError
If ``Ob0`` is `None`.
Notes
-----
This does not include neutrinos, even if non-relativistic at the
redshift of interest.
"""
if self._Odm0 is None:
raise ValueError("Baryonic density not set for this cosmology, "
"unclear meaning of dark matter density")
z = aszarr(z)
return self._Odm0 * (z + 1.0) ** 3 * self.inv_efunc(z) ** 2
def Ok(self, z):
"""
Return the equivalent density parameter for curvature at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Ok : ndarray or float
The equivalent density parameter for curvature at each redshift.
Returns `float` if the input is scalar.
"""
z = aszarr(z)
if self._Ok0 == 0: # Common enough to be worth checking explicitly
return np.zeros(z.shape) if hasattr(z, "shape") else 0.0
return self._Ok0 * (z + 1.0) ** 2 * self.inv_efunc(z) ** 2
def Ode(self, z):
"""Return the density parameter for dark energy at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Ode : ndarray or float
The density of non-relativistic matter relative to the critical
density at each redshift.
Returns `float` if the input is scalar.
"""
z = aszarr(z)
if self._Ode0 == 0: # Common enough to be worth checking explicitly
return np.zeros(z.shape) if hasattr(z, "shape") else 0.0
return self._Ode0 * self.de_density_scale(z) * self.inv_efunc(z) ** 2
def Ogamma(self, z):
"""Return the density parameter for photons at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Ogamma : ndarray or float
The energy density of photons relative to the critical density at
each redshift.
Returns `float` if the input is scalar.
"""
z = aszarr(z)
return self._Ogamma0 * (z + 1.0) ** 4 * self.inv_efunc(z) ** 2
def Onu(self, z):
r"""Return the density parameter for neutrinos at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Onu : ndarray or float
The energy density of neutrinos relative to the critical density at
each redshift. Note that this includes their kinetic energy (if
they have mass), so it is not equal to the commonly used
:math:`\sum \frac{m_{\nu}}{94 eV}`, which does not include
kinetic energy.
Returns `float` if the input is scalar.
"""
z = aszarr(z)
if self._Onu0 == 0: # Common enough to be worth checking explicitly
return np.zeros(z.shape) if hasattr(z, "shape") else 0.0
return self.Ogamma(z) * self.nu_relative_density(z)
def Tcmb(self, z):
"""Return the CMB temperature at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Tcmb : `~astropy.units.Quantity` ['temperature']
The temperature of the CMB in K.
"""
return self._Tcmb0 * (aszarr(z) + 1.0)
def Tnu(self, z):
"""Return the neutrino temperature at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Tnu : `~astropy.units.Quantity` ['temperature']
The temperature of the cosmic neutrino background in K.
"""
return self._Tnu0 * (aszarr(z) + 1.0)
def nu_relative_density(self, z):
r"""Neutrino density function relative to the energy density in photons.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
f : ndarray or float
The neutrino density scaling factor relative to the density in
photons at each redshift.
Only returns `float` if z is scalar.
Notes
-----
The density in neutrinos is given by
.. math::
\rho_{\nu} \left(a\right) = 0.2271 \, N_{eff} \,
f\left(m_{\nu} a / T_{\nu 0} \right) \,
\rho_{\gamma} \left( a \right)
where
.. math::
f \left(y\right) = \frac{120}{7 \pi^4}
\int_0^{\infty} \, dx \frac{x^2 \sqrt{x^2 + y^2}}
{e^x + 1}
assuming that all neutrino species have the same mass.
If they have different masses, a similar term is calculated for each
one. Note that ``f`` has the asymptotic behavior :math:`f(0) = 1`. This
method returns :math:`0.2271 f` using an analytical fitting formula
given in Komatsu et al. 2011, ApJS 192, 18.
"""
# Note that there is also a scalar-z-only cython implementation of
# this in scalar_inv_efuncs.pyx, so if you find a problem in this
# you need to update there too.
# See Komatsu et al. 2011, eq 26 and the surrounding discussion
# for an explanation of what we are doing here.
# However, this is modified to handle multiple neutrino masses
# by computing the above for each mass, then summing
prefac = 0.22710731766 # 7/8 (4/11)^4/3 -- see any cosmo book
# The massive and massless contribution must be handled separately
# But check for common cases first
z = aszarr(z)
if not self._massivenu:
return prefac * self._Neff * (np.ones(z.shape) if hasattr(z, "shape") else 1.0)
# These are purely fitting constants -- see the Komatsu paper
p = 1.83
invp = 0.54644808743 # 1.0 / p
k = 0.3173
curr_nu_y = self._nu_y / (1. + np.expand_dims(z, axis=-1))
rel_mass_per = (1.0 + (k * curr_nu_y) ** p) ** invp
rel_mass = rel_mass_per.sum(-1) + self._nmasslessnu
return prefac * self._neff_per_nu * rel_mass
def _w_integrand(self, ln1pz):
"""Internal convenience function for w(z) integral (eq. 5 of [1]_).
Parameters
----------
ln1pz : `~numbers.Number` or scalar ndarray
Assumes scalar input, since this should only be called inside an
integral.
References
----------
.. [1] Linder, E. (2003). Exploring the Expansion History of the
Universe. Phys. Rev. Lett., 90, 091301.
"""
return 1.0 + self.w(exp(ln1pz) - 1.0)
def de_density_scale(self, z):
r"""Evaluates the redshift dependence of the dark energy density.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
I : ndarray or float
The scaling of the energy density of dark energy with redshift.
Returns `float` if the input is scalar.
Notes
-----
The scaling factor, I, is defined by :math:`\rho(z) = \rho_0 I`,
and is given by
.. math::
I = \exp \left( 3 \int_{a}^1 \frac{ da^{\prime} }{ a^{\prime} }
\left[ 1 + w\left( a^{\prime} \right) \right] \right)
The actual integral used is rewritten from [1]_ to be in terms of z.
It will generally helpful for subclasses to overload this method if
the integral can be done analytically for the particular dark
energy equation of state that they implement.
References
----------
.. [1] Linder, E. (2003). Exploring the Expansion History of the
Universe. Phys. Rev. Lett., 90, 091301.
"""
# This allows for an arbitrary w(z) following eq (5) of
# Linder 2003, PRL 90, 91301. The code here evaluates
# the integral numerically. However, most popular
# forms of w(z) are designed to make this integral analytic,
# so it is probably a good idea for subclasses to overload this
# method if an analytic form is available.
z = aszarr(z)
if not isinstance(z, (Number, np.generic)): # array/Quantity
ival = np.array([quad(self._w_integrand, 0, log(1 + redshift))[0]
for redshift in z])
return np.exp(3 * ival)
else: # scalar
ival = quad(self._w_integrand, 0, log(z + 1.0))[0]
return exp(3 * ival)
def efunc(self, z):
"""Function used to calculate H(z), the Hubble parameter.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The redshift scaling of the Hubble constant.
Returns `float` if the input is scalar.
Defined such that :math:`H(z) = H_0 E(z)`.
Notes
-----
It is not necessary to override this method, but if de_density_scale
takes a particularly simple form, it may be advantageous to.
"""
Or = self._Ogamma0 + (self._Onu0 if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z))
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return np.sqrt(zp1 ** 2 * ((Or * zp1 + self._Om0) * zp1 + self._Ok0) +
self._Ode0 * self.de_density_scale(z))
def inv_efunc(self, z):
"""Inverse of ``efunc``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The redshift scaling of the inverse Hubble constant.
Returns `float` if the input is scalar.
"""
# Avoid the function overhead by repeating code
Or = self._Ogamma0 + (self._Onu0 if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z))
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return (zp1 ** 2 * ((Or * zp1 + self._Om0) * zp1 + self._Ok0) +
self._Ode0 * self.de_density_scale(z))**(-0.5)
def _lookback_time_integrand_scalar(self, z):
"""Integrand of the lookback time (equation 30 of [1]_).
Parameters
----------
z : float
Input redshift.
Returns
-------
I : float
The integrand for the lookback time.
References
----------
.. [1] Hogg, D. (1999). Distance measures in cosmology, section 11.
arXiv e-prints, astro-ph/9905116.
"""
return self._inv_efunc_scalar(z, *self._inv_efunc_scalar_args) / (z + 1.0)
def lookback_time_integrand(self, z):
"""Integrand of the lookback time (equation 30 of [1]_).
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
I : float or array
The integrand for the lookback time.
References
----------
.. [1] Hogg, D. (1999). Distance measures in cosmology, section 11.
arXiv e-prints, astro-ph/9905116.
"""
z = aszarr(z)
return self.inv_efunc(z) / (z + 1.0)
def _abs_distance_integrand_scalar(self, z):
"""Integrand of the absorption distance [1]_.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
X : float
The integrand for the absorption distance.
References
----------
.. [1] Hogg, D. (1999). Distance measures in cosmology, section 11.
arXiv e-prints, astro-ph/9905116.
"""
args = self._inv_efunc_scalar_args
return (z + 1.0) ** 2 * self._inv_efunc_scalar(z, *args)
def abs_distance_integrand(self, z):
"""Integrand of the absorption distance [1]_.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
X : float or array
The integrand for the absorption distance.
References
----------
.. [1] Hogg, D. (1999). Distance measures in cosmology, section 11.
arXiv e-prints, astro-ph/9905116.
"""
z = aszarr(z)
return (z + 1.0) ** 2 * self.inv_efunc(z)
def H(self, z):
"""Hubble parameter (km/s/Mpc) at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
H : `~astropy.units.Quantity` ['frequency']
Hubble parameter at each input redshift.
"""
return self._H0 * self.efunc(z)
def scale_factor(self, z):
"""Scale factor at redshift ``z``.
The scale factor is defined as :math:`a = 1 / (1 + z)`.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
a : ndarray or float
Scale factor at each input redshift.
Returns `float` if the input is scalar.
"""
return 1.0 / (aszarr(z) + 1.0)
def lookback_time(self, z):
"""Lookback time in Gyr to redshift ``z``.
The lookback time is the difference between the age of the Universe now
and the age at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
Lookback time in Gyr to each input redshift.
See Also
--------
z_at_value : Find the redshift corresponding to a lookback time.
"""
return self._lookback_time(z)
def _lookback_time(self, z):
"""Lookback time in Gyr to redshift ``z``.
The lookback time is the difference between the age of the Universe now
and the age at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
Lookback time in Gyr to each input redshift.
"""
return self._hubble_time * self._integral_lookback_time(z)
@vectorize_redshift_method
def _integral_lookback_time(self, z, /):
"""Lookback time to redshift ``z``. Value in units of Hubble time.
The lookback time is the difference between the age of the Universe now
and the age at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : float or ndarray
Lookback time to each input redshift in Hubble time units.
Returns `float` if input scalar, `~numpy.ndarray` otherwise.
"""
return quad(self._lookback_time_integrand_scalar, 0, z)[0]
def lookback_distance(self, z):
"""
The lookback distance is the light travel time distance to a given
redshift. It is simply c * lookback_time. It may be used to calculate
the proper distance between two redshifts, e.g. for the mean free path
to ionizing radiation.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Lookback distance in Mpc
"""
return (self.lookback_time(z) * const.c).to(u.Mpc)
def age(self, z):
"""Age of the universe in Gyr at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
The age of the universe in Gyr at each input redshift.
See Also
--------
z_at_value : Find the redshift corresponding to an age.
"""
return self._age(z)
def _age(self, z):
"""Age of the universe in Gyr at redshift ``z``.
This internal function exists to be re-defined for optimizations.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
The age of the universe in Gyr at each input redshift.
"""
return self._hubble_time * self._integral_age(z)
@vectorize_redshift_method
def _integral_age(self, z, /):
"""Age of the universe at redshift ``z``. Value in units of Hubble time.
Calculated using explicit integration.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : float or ndarray
The age of the universe at each input redshift in Hubble time units.
Returns `float` if input scalar, `~numpy.ndarray` otherwise.
See Also
--------
z_at_value : Find the redshift corresponding to an age.
"""
return quad(self._lookback_time_integrand_scalar, z, inf)[0]
def critical_density(self, z):
"""Critical density in grams per cubic cm at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
rho : `~astropy.units.Quantity`
Critical density in g/cm^3 at each input redshift.
"""
return self._critical_density0 * (self.efunc(z)) ** 2
def comoving_distance(self, z):
"""Comoving line-of-sight distance in Mpc at a given redshift.
The comoving distance along the line-of-sight between two objects
remains constant with time for objects in the Hubble flow.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving distance in Mpc to each input redshift.
"""
return self._comoving_distance_z1z2(0, z)
def _comoving_distance_z1z2(self, z1, z2):
"""
Comoving line-of-sight distance in Mpc between objects at redshifts
``z1`` and ``z2``.
The comoving distance along the line-of-sight between two objects
remains constant with time for objects in the Hubble flow.
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving distance in Mpc between each input redshift.
"""
return self._integral_comoving_distance_z1z2(z1, z2)
@vectorize_redshift_method(nin=2)
def _integral_comoving_distance_z1z2_scalar(self, z1, z2, /):
"""
Comoving line-of-sight distance between objects at redshifts ``z1`` and
``z2``. Value in Mpc.
The comoving distance along the line-of-sight between two objects
remains constant with time for objects in the Hubble flow.
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
d : float or ndarray
Comoving distance in Mpc between each input redshift.
Returns `float` if input scalar, `~numpy.ndarray` otherwise.
"""
return quad(self._inv_efunc_scalar, z1, z2, args=self._inv_efunc_scalar_args)[0]
def _integral_comoving_distance_z1z2(self, z1, z2):
"""
Comoving line-of-sight distance in Mpc between objects at redshifts
``z1`` and ``z2``. The comoving distance along the line-of-sight
between two objects remains constant with time for objects in the
Hubble flow.
Parameters
----------
z1, z2 : Quantity-like ['redshift'] or array-like
Input redshifts.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving distance in Mpc between each input redshift.
"""
return self._hubble_distance * self._integral_comoving_distance_z1z2_scalar(z1, z2)
def comoving_transverse_distance(self, z):
r"""Comoving transverse distance in Mpc at a given redshift.
This value is the transverse comoving distance at redshift ``z``
corresponding to an angular separation of 1 radian. This is the same as
the comoving distance if :math:`\Omega_k` is zero (as in the current
concordance Lambda-CDM model).
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving transverse distance in Mpc at each input redshift.
Notes
-----
This quantity is also called the 'proper motion distance' in some texts.
"""
return self._comoving_transverse_distance_z1z2(0, z)
def _comoving_transverse_distance_z1z2(self, z1, z2):
r"""Comoving transverse distance in Mpc between two redshifts.
This value is the transverse comoving distance at redshift ``z2`` as
seen from redshift ``z1`` corresponding to an angular separation of
1 radian. This is the same as the comoving distance if :math:`\Omega_k`
is zero (as in the current concordance Lambda-CDM model).
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving transverse distance in Mpc between input redshift.
Notes
-----
This quantity is also called the 'proper motion distance' in some texts.
"""
Ok0 = self._Ok0
dc = self._comoving_distance_z1z2(z1, z2)
if Ok0 == 0:
return dc
sqrtOk0 = sqrt(abs(Ok0))
dh = self._hubble_distance
if Ok0 > 0:
return dh / sqrtOk0 * np.sinh(sqrtOk0 * dc.value / dh.value)
else:
return dh / sqrtOk0 * sin(sqrtOk0 * dc.value / dh.value)
def angular_diameter_distance(self, z):
"""Angular diameter distance in Mpc at a given redshift.
This gives the proper (sometimes called 'physical') transverse
distance corresponding to an angle of 1 radian for an object
at redshift ``z`` ([1]_, [2]_, [3]_).
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Angular diameter distance in Mpc at each input redshift.
References
----------
.. [1] Weinberg, 1972, pp 420-424; Weedman, 1986, pp 421-424.
.. [2] Weedman, D. (1986). Quasar astronomy, pp 65-67.
.. [3] Peebles, P. (1993). Principles of Physical Cosmology, pp 325-327.
"""
z = aszarr(z)
return self.comoving_transverse_distance(z) / (z + 1.0)
def luminosity_distance(self, z):
"""Luminosity distance in Mpc at redshift ``z``.
This is the distance to use when converting between the bolometric flux
from an object at redshift ``z`` and its bolometric luminosity [1]_.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Luminosity distance in Mpc at each input redshift.
See Also
--------
z_at_value : Find the redshift corresponding to a luminosity distance.
References
----------
.. [1] Weinberg, 1972, pp 420-424; Weedman, 1986, pp 60-62.
"""
z = aszarr(z)
return (z + 1.0) * self.comoving_transverse_distance(z)
def angular_diameter_distance_z1z2(self, z1, z2):
"""Angular diameter distance between objects at 2 redshifts.
Useful for gravitational lensing, for example computing the angular
diameter distance between a lensed galaxy and the foreground lens.
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts. For most practical applications such as
gravitational lensing, ``z2`` should be larger than ``z1``. The
method will work for ``z2 < z1``; however, this will return
negative distances.
Returns
-------
d : `~astropy.units.Quantity`
The angular diameter distance between each input redshift pair.
Returns scalar if input is scalar, array else-wise.
"""
z1, z2 = aszarr(z1), aszarr(z2)
if np.any(z2 < z1):
warnings.warn(f"Second redshift(s) z2 ({z2}) is less than first "
f"redshift(s) z1 ({z1}).", AstropyUserWarning)
return self._comoving_transverse_distance_z1z2(z1, z2) / (z2 + 1.0)
@vectorize_redshift_method
def absorption_distance(self, z, /):
"""Absorption distance at redshift ``z``.
This is used to calculate the number of objects with some cross section
of absorption and number density intersecting a sightline per unit
redshift path ([1]_, [2]_).
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : float or ndarray
Absorption distance (dimensionless) at each input redshift.
Returns `float` if input scalar, `~numpy.ndarray` otherwise.
References
----------
.. [1] Hogg, D. (1999). Distance measures in cosmology, section 11.
arXiv e-prints, astro-ph/9905116.
.. [2] Bahcall, John N. and Peebles, P.J.E. 1969, ApJ, 156L, 7B
"""
return quad(self._abs_distance_integrand_scalar, 0, z)[0]
def distmod(self, z):
"""Distance modulus at redshift ``z``.
The distance modulus is defined as the (apparent magnitude - absolute
magnitude) for an object at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
distmod : `~astropy.units.Quantity` ['length']
Distance modulus at each input redshift, in magnitudes.
See Also
--------
z_at_value : Find the redshift corresponding to a distance modulus.
"""
# Remember that the luminosity distance is in Mpc
# Abs is necessary because in certain obscure closed cosmologies
# the distance modulus can be negative -- which is okay because
# it enters as the square.
val = 5. * np.log10(abs(self.luminosity_distance(z).value)) + 25.0
return u.Quantity(val, u.mag)
def comoving_volume(self, z):
r"""Comoving volume in cubic Mpc at redshift ``z``.
This is the volume of the universe encompassed by redshifts less than
``z``. For the case of :math:`\Omega_k = 0` it is a sphere of radius
`comoving_distance` but it is less intuitive if :math:`\Omega_k` is not.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
V : `~astropy.units.Quantity`
Comoving volume in :math:`Mpc^3` at each input redshift.
"""
Ok0 = self._Ok0
if Ok0 == 0:
return 4.0 / 3.0 * pi * self.comoving_distance(z) ** 3
dh = self._hubble_distance.value # .value for speed
dm = self.comoving_transverse_distance(z).value
term1 = 4.0 * pi * dh ** 3 / (2.0 * Ok0) * u.Mpc ** 3
term2 = dm / dh * np.sqrt(1 + Ok0 * (dm / dh) ** 2)
term3 = sqrt(abs(Ok0)) * dm / dh
if Ok0 > 0:
return term1 * (term2 - 1. / sqrt(abs(Ok0)) * np.arcsinh(term3))
else:
return term1 * (term2 - 1. / sqrt(abs(Ok0)) * np.arcsin(term3))
def differential_comoving_volume(self, z):
"""Differential comoving volume at redshift z.
Useful for calculating the effective comoving volume.
For example, allows for integration over a comoving volume that has a
sensitivity function that changes with redshift. The total comoving
volume is given by integrating ``differential_comoving_volume`` to
redshift ``z`` and multiplying by a solid angle.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
dV : `~astropy.units.Quantity`
Differential comoving volume per redshift per steradian at each
input redshift.
"""
dm = self.comoving_transverse_distance(z)
return self._hubble_distance * (dm ** 2.0) / (self.efunc(z) << u.steradian)
def kpc_comoving_per_arcmin(self, z):
"""
Separation in transverse comoving kpc corresponding to an arcminute at
redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
The distance in comoving kpc corresponding to an arcmin at each
input redshift.
"""
return self.comoving_transverse_distance(z).to(u.kpc) / _radian_in_arcmin
def kpc_proper_per_arcmin(self, z):
"""
Separation in transverse proper kpc corresponding to an arcminute at
redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
The distance in proper kpc corresponding to an arcmin at each input
redshift.
"""
return self.angular_diameter_distance(z).to(u.kpc) / _radian_in_arcmin
def arcsec_per_kpc_comoving(self, z):
"""
Angular separation in arcsec corresponding to a comoving kpc at
redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
theta : `~astropy.units.Quantity` ['angle']
The angular separation in arcsec corresponding to a comoving kpc at
each input redshift.
"""
return _radian_in_arcsec / self.comoving_transverse_distance(z).to(u.kpc)
def arcsec_per_kpc_proper(self, z):
"""
Angular separation in arcsec corresponding to a proper kpc at redshift
``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
theta : `~astropy.units.Quantity` ['angle']
The angular separation in arcsec corresponding to a proper kpc at
each input redshift.
"""
return _radian_in_arcsec / self.angular_diameter_distance(z).to(u.kpc)
class FlatFLRWMixin(FlatCosmologyMixin):
"""
Mixin class for flat FLRW cosmologies. Do NOT instantiate directly.
Must precede the base class in the multiple-inheritance so that this
mixin's ``__init__`` proceeds the base class'.
Note that all instances of ``FlatFLRWMixin`` are flat, but not all
flat cosmologies are instances of ``FlatFLRWMixin``. As example,
``LambdaCDM`` **may** be flat (for the a specific set of parameter values),
but ``FlatLambdaCDM`` **will** be flat.
"""
Ode0 = FLRW.Ode0.clone(derived=True) # same as FLRW, but now a derived param.
def __init_subclass__(cls):
super().__init_subclass__()
if "Ode0" in cls._init_signature.parameters:
raise TypeError("subclasses of `FlatFLRWMixin` cannot have `Ode0` in `__init__`")
def __init__(self, *args, **kw):
super().__init__(*args, **kw) # guaranteed not to have `Ode0`
# Do some twiddling after the fact to get flatness
self._Ok0 = 0.0
self._Ode0 = 1.0 - (self._Om0 + self._Ogamma0 + self._Onu0 + self._Ok0)
@lazyproperty
def nonflat(self: _FlatFLRWMixinT) -> _FLRWT:
# Create BoundArgument to handle args versus kwargs.
# This also handles all errors from mismatched arguments
ba = self._nonflat_cls_._init_signature.bind_partial(**self._init_arguments,
Ode0=self.Ode0)
# Make new instance, respecting args vs kwargs
inst = self._nonflat_cls_(*ba.args, **ba.kwargs)
# Because of machine precision, make sure parameters exactly match
for n in inst.__all_parameters__ + ("Ok0", ):
setattr(inst, "_" + n, getattr(self, n))
return inst
def clone(self, *, meta: Optional[Mapping] = None, to_nonflat: bool = None, **kwargs: Any):
"""Returns a copy of this object with updated parameters, as specified.
This cannot be used to change the type of the cosmology, except for
changing to the non-flat version of this cosmology.
Parameters
----------
meta : mapping or None (optional, keyword-only)
Metadata that will update the current metadata.
to_nonflat : bool or None, optional keyword-only
Whether to change to the non-flat version of this cosmology.
**kwargs
Cosmology parameter (and name) modifications. If any parameter is
changed and a new name is not given, the name will be set to "[old
name] (modified)".
Returns
-------
newcosmo : `~astropy.cosmology.Cosmology` subclass instance
A new instance of this class with updated parameters as specified.
If no arguments are given, then a reference to this object is
returned instead of copy.
Examples
--------
To make a copy of the ``Planck13`` cosmology with a different matter
density (``Om0``), and a new name:
>>> from astropy.cosmology import Planck13
>>> Planck13.clone(name="Modified Planck 2013", Om0=0.35)
FlatLambdaCDM(name="Modified Planck 2013", H0=67.77 km / (Mpc s),
Om0=0.35, ...
If no name is specified, the new name will note the modification.
>>> Planck13.clone(Om0=0.35).name
'Planck13 (modified)'
The keyword 'to_nonflat' can be used to clone on the non-flat equivalent
cosmology.
>>> Planck13.clone(to_nonflat=True)
LambdaCDM(name="Planck13", ...
>>> Planck13.clone(H0=70, to_nonflat=True)
LambdaCDM(name="Planck13 (modified)", H0=70.0 km / (Mpc s), ...
With 'to_nonflat' `True`, ``Ode0`` can be modified.
>>> Planck13.clone(to_nonflat=True, Ode0=1)
LambdaCDM(name="Planck13 (modified)", H0=67.77 km / (Mpc s),
Om0=0.30712, Ode0=1.0, ...
"""
return super().clone(meta=meta, to_nonflat=to_nonflat, **kwargs)
@property
def Otot0(self):
"""Omega total; the total density/critical density at z=0."""
return 1.0
def Otot(self, z):
"""The total density parameter at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
Otot : ndarray or float
Returns float if input scalar. Value of 1.
"""
return 1.0 if isinstance(z, (Number, np.generic)) else np.ones_like(z, subok=False)
def __equiv__(self, other):
"""flat-FLRW equivalence. Use ``.is_equivalent()`` for actual check!
Parameters
----------
other : `~astropy.cosmology.FLRW` subclass instance
The object in which to compare.
Returns
-------
bool or `NotImplemented`
`True` if 'other' is of the same class / non-flat class (e.g.
``FlatLambdaCDM`` and ``LambdaCDM``) has matching parameters
and parameter values. `False` if 'other' is of the same class but
has different parameters. `NotImplemented` otherwise.
"""
# check if case (1): same class & parameters
if isinstance(other, FlatFLRWMixin):
return super().__equiv__(other)
# check cases (3, 4), if other is the non-flat version of this class
# this makes the assumption that any further subclass of a flat cosmo
# keeps the same physics.
comparable_classes = [c for c in self.__class__.mro()[1:]
if (issubclass(c, FLRW) and c is not FLRW)]
if other.__class__ not in comparable_classes:
return NotImplemented
# check if have equivalent parameters
# check all parameters in other match those in 'self' and 'other' has
# no extra parameters (case (2)) except for 'Ode0' and that other
params_eq = (
set(self.__all_parameters__) == set(other.__all_parameters__) # no extra
and all(np.all(getattr(self, k) == getattr(other, k)) # equal
for k in self.__parameters__)
and other.is_flat
)
return params_eq
|
15171d788d147c683150bb3e565fc386afc3c13b613d494edb11fe973eb34d11 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from numpy import sqrt
import astropy.units as u
from astropy.cosmology.parameter import Parameter
from astropy.cosmology.utils import aszarr
from . import scalar_inv_efuncs
from .base import FLRW, FlatFLRWMixin
__all__ = ["wCDM", "FlatwCDM"]
__doctest_requires__ = {'*': ['scipy']}
class wCDM(FLRW):
"""
FLRW cosmology with a constant dark energy equation of state and curvature.
This has one additional attribute beyond those of FLRW.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
Ode0 : float
Omega dark energy: density of dark energy in units of the critical
density at z=0.
w0 : float, optional
Dark energy equation of state at all redshifts. This is
pressure/density for dark energy in units where c=1. A cosmological
constant has w0=-1.0.
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any computation
that requires its value will raise an exception.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Examples
--------
>>> from astropy.cosmology import wCDM
>>> cosmo = wCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
"""
w0 = Parameter(doc="Dark energy equation of state.", fvalidate="float")
def __init__(self, H0, Om0, Ode0, w0=-1.0, Tcmb0=0.0*u.K, Neff=3.04,
m_nu=0.0*u.eV, Ob0=None, *, name=None, meta=None):
super().__init__(H0=H0, Om0=Om0, Ode0=Ode0, Tcmb0=Tcmb0, Neff=Neff,
m_nu=m_nu, Ob0=Ob0, name=name, meta=meta)
self.w0 = w0
# Please see :ref:`astropy-cosmology-fast-integrals` for discussion
# about what is being done here.
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.wcdm_inv_efunc_norel
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._w0)
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.wcdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._Ogamma0 + self._Onu0,
self._w0)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.wcdm_inv_efunc
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._Ogamma0, self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list, self._w0)
def w(self, z):
r"""Returns dark energy equation of state at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
w : ndarray or float
The dark energy equation of state
Returns `float` if the input is scalar.
Notes
-----
The dark energy equation of state is defined as
:math:`w(z) = P(z)/\rho(z)`, where :math:`P(z)` is the pressure at
redshift z and :math:`\rho(z)` is the density at redshift z, both in
units where c=1. Here this is :math:`w(z) = w_0`.
"""
z = aszarr(z)
return self._w0 * (np.ones(z.shape) if hasattr(z, "shape") else 1.0)
def de_density_scale(self, z):
r"""Evaluates the redshift dependence of the dark energy density.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
I : ndarray or float
The scaling of the energy density of dark energy with redshift.
Returns `float` if the input is scalar.
Notes
-----
The scaling factor, I, is defined by :math:`\rho(z) = \rho_0 I`,
and in this case is given by
:math:`I = \left(1 + z\right)^{3\left(1 + w_0\right)}`
"""
return (aszarr(z) + 1.0) ** (3.0 * (1. + self._w0))
def efunc(self, z):
"""Function used to calculate H(z), the Hubble parameter.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The redshift scaling of the Hubble constant.
Returns `float` if the input is scalar.
Defined such that :math:`H(z) = H_0 E(z)`.
"""
Or = self._Ogamma0 + (self._Onu0 if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z))
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return sqrt(zp1 ** 2 * ((Or * zp1 + self._Om0) * zp1 + self._Ok0) +
self._Ode0 * zp1 ** (3. * (1. + self._w0)))
def inv_efunc(self, z):
r"""Function used to calculate :math:`\frac{1}{H_z}`.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The inverse redshift scaling of the Hubble constant.
Returns `float` if the input is scalar.
Defined such that :math:`H_z = H_0 / E`.
"""
Or = self._Ogamma0 + (self._Onu0 if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z))
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return (zp1 ** 2 * ((Or * zp1 + self._Om0) * zp1 + self._Ok0) +
self._Ode0 * zp1 ** (3. * (1. + self._w0)))**(-0.5)
class FlatwCDM(FlatFLRWMixin, wCDM):
"""
FLRW cosmology with a constant dark energy equation of state and no spatial
curvature.
This has one additional attribute beyond those of FLRW.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
w0 : float, optional
Dark energy equation of state at all redshifts. This is
pressure/density for dark energy in units where c=1. A cosmological
constant has w0=-1.0.
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any computation
that requires its value will raise an exception.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Examples
--------
>>> from astropy.cosmology import FlatwCDM
>>> cosmo = FlatwCDM(H0=70, Om0=0.3, w0=-0.9)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
To get an equivalent cosmology, but of type `astropy.cosmology.wCDM`,
use :attr:`astropy.cosmology.FlatFLRWMixin.nonflat`.
>>> cosmo.nonflat
wCDM(H0=70.0 km / (Mpc s), Om0=0.3, ...
"""
def __init__(self, H0, Om0, w0=-1.0, Tcmb0=0.0*u.K, Neff=3.04, m_nu=0.0*u.eV,
Ob0=None, *, name=None, meta=None):
super().__init__(H0=H0, Om0=Om0, Ode0=0.0, w0=w0, Tcmb0=Tcmb0,
Neff=Neff, m_nu=m_nu, Ob0=Ob0, name=name, meta=meta)
# Please see :ref:`astropy-cosmology-fast-integrals` for discussion
# about what is being done here.
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.fwcdm_inv_efunc_norel
self._inv_efunc_scalar_args = (self._Om0, self._Ode0,
self._w0)
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.fwcdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (self._Om0, self._Ode0,
self._Ogamma0 + self._Onu0,
self._w0)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.fwcdm_inv_efunc
self._inv_efunc_scalar_args = (self._Om0, self._Ode0,
self._Ogamma0, self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list, self._w0)
def efunc(self, z):
"""Function used to calculate H(z), the Hubble parameter.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The redshift scaling of the Hubble constant.
Returns `float` if the input is scalar.
Defined such that :math:`H(z) = H_0 E(z)`.
"""
Or = self._Ogamma0 + (self._Onu0 if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z))
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return sqrt(zp1 ** 3 * (Or * zp1 + self._Om0) + self._Ode0 * zp1 ** (3.0 * (1 + self._w0)))
def inv_efunc(self, z):
r"""Function used to calculate :math:`\frac{1}{H_z}`.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The inverse redshift scaling of the Hubble constant.
Returns `float` if the input is scalar.
Defined such that :math:`H(z) = H_0 E(z)`.
"""
Or = self._Ogamma0 + (self._Onu0 if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z))
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return (zp1 ** 3 * (Or * zp1 + self._Om0) +
self._Ode0 * zp1 ** (3. * (1. + self._w0)))**(-0.5)
|
76aa25e1d8114940de2416e0d9aa0bf94daf320315d137e7800244d8d41f9564 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from math import acos, cos, inf, sin, sqrt
from numbers import Number
import numpy as np
from numpy import log
import astropy.units as u
from astropy.cosmology.utils import aszarr
from astropy.utils.compat.optional_deps import HAS_SCIPY
from . import scalar_inv_efuncs
from .base import FLRW, FlatFLRWMixin
# isort: split
if HAS_SCIPY:
from scipy.special import ellipkinc, hyp2f1
else:
def ellipkinc(*args, **kwargs):
raise ModuleNotFoundError("No module named 'scipy.special'")
def hyp2f1(*args, **kwargs):
raise ModuleNotFoundError("No module named 'scipy.special'")
__all__ = ["LambdaCDM", "FlatLambdaCDM"]
__doctest_requires__ = {'*': ['scipy']}
class LambdaCDM(FLRW):
"""FLRW cosmology with a cosmological constant and curvature.
This has no additional attributes beyond those of FLRW.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
Ode0 : float
Omega dark energy: density of the cosmological constant in units of
the critical density at z=0.
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any computation
that requires its value will raise an exception.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Examples
--------
>>> from astropy.cosmology import LambdaCDM
>>> cosmo = LambdaCDM(H0=70, Om0=0.3, Ode0=0.7)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
"""
def __init__(self, H0, Om0, Ode0, Tcmb0=0.0*u.K, Neff=3.04, m_nu=0.0*u.eV,
Ob0=None, *, name=None, meta=None):
super().__init__(H0=H0, Om0=Om0, Ode0=Ode0, Tcmb0=Tcmb0, Neff=Neff,
m_nu=m_nu, Ob0=Ob0, name=name, meta=meta)
# Please see :ref:`astropy-cosmology-fast-integrals` for discussion
# about what is being done here.
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.lcdm_inv_efunc_norel
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0)
if self._Ok0 == 0:
self._optimize_flat_norad()
else:
self._comoving_distance_z1z2 = self._elliptic_comoving_distance_z1z2
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.lcdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._Ogamma0 + self._Onu0)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.lcdm_inv_efunc
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._Ogamma0, self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list)
def _optimize_flat_norad(self):
"""Set optimizations for flat LCDM cosmologies with no radiation."""
# Call out the Om0=0 (de Sitter) and Om0=1 (Einstein-de Sitter)
# The dS case is required because the hypergeometric case
# for Omega_M=0 would lead to an infinity in its argument.
# The EdS case is three times faster than the hypergeometric.
if self._Om0 == 0:
self._comoving_distance_z1z2 = self._dS_comoving_distance_z1z2
self._age = self._dS_age
self._lookback_time = self._dS_lookback_time
elif self._Om0 == 1:
self._comoving_distance_z1z2 = self._EdS_comoving_distance_z1z2
self._age = self._EdS_age
self._lookback_time = self._EdS_lookback_time
else:
self._comoving_distance_z1z2 = self._hypergeometric_comoving_distance_z1z2
self._age = self._flat_age
self._lookback_time = self._flat_lookback_time
def w(self, z):
r"""Returns dark energy equation of state at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
w : ndarray or float
The dark energy equation of state.
Returns `float` if the input is scalar.
Notes
-----
The dark energy equation of state is defined as
:math:`w(z) = P(z)/\rho(z)`, where :math:`P(z)` is the pressure at
redshift z and :math:`\rho(z)` is the density at redshift z, both in
units where c=1. Here this is :math:`w(z) = -1`.
"""
z = aszarr(z)
return -1.0 * (np.ones(z.shape) if hasattr(z, "shape") else 1.0)
def de_density_scale(self, z):
r"""Evaluates the redshift dependence of the dark energy density.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
I : ndarray or float
The scaling of the energy density of dark energy with redshift.
Returns `float` if the input is scalar.
Notes
-----
The scaling factor, I, is defined by :math:`\rho(z) = \rho_0 I`,
and in this case is given by :math:`I = 1`.
"""
z = aszarr(z)
return np.ones(z.shape) if hasattr(z, "shape") else 1.0
def _elliptic_comoving_distance_z1z2(self, z1, z2):
r"""Comoving transverse distance in Mpc between two redshifts.
This value is the transverse comoving distance at redshift ``z``
corresponding to an angular separation of 1 radian. This is the same as
the comoving distance if :math:`\Omega_k` is zero.
For :math:`\Omega_{rad} = 0` the comoving distance can be directly
calculated as an elliptic integral [1]_.
Not valid or appropriate for flat cosmologies (Ok0=0).
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving distance in Mpc between each input redshift.
References
----------
.. [1] Kantowski, R., Kao, J., & Thomas, R. (2000). Distance-Redshift
in Inhomogeneous FLRW. arXiv e-prints, astro-ph/0002334.
"""
try:
z1, z2 = np.broadcast_arrays(z1, z2)
except ValueError as e:
raise ValueError("z1 and z2 have different shapes") from e
# The analytic solution is not valid for any of Om0, Ode0, Ok0 == 0.
# Use the explicit integral solution for these cases.
if self._Om0 == 0 or self._Ode0 == 0 or self._Ok0 == 0:
return self._integral_comoving_distance_z1z2(z1, z2)
b = -(27. / 2) * self._Om0**2 * self._Ode0 / self._Ok0**3
kappa = b / abs(b)
if (b < 0) or (2 < b):
def phi_z(Om0, Ok0, kappa, y1, A, z):
return np.arccos(((z + 1.0) * Om0 / abs(Ok0) + kappa * y1 - A) /
((z + 1.0) * Om0 / abs(Ok0) + kappa * y1 + A))
v_k = pow(kappa * (b - 1) + sqrt(b * (b - 2)), 1. / 3)
y1 = (-1 + kappa * (v_k + 1 / v_k)) / 3
A = sqrt(y1 * (3 * y1 + 2))
g = 1 / sqrt(A)
k2 = (2 * A + kappa * (1 + 3 * y1)) / (4 * A)
phi_z1 = phi_z(self._Om0, self._Ok0, kappa, y1, A, z1)
phi_z2 = phi_z(self._Om0, self._Ok0, kappa, y1, A, z2)
# Get lower-right 0<b<2 solution in Om0, Ode0 plane.
# Fot the upper-left 0<b<2 solution the Big Bang didn't happen.
elif (0 < b) and (b < 2) and self._Om0 > self._Ode0:
def phi_z(Om0, Ok0, y1, y2, z):
return np.arcsin(np.sqrt((y1 - y2) /
((z + 1.0) * Om0 / abs(Ok0) + y1)))
yb = cos(acos(1 - b) / 3)
yc = sqrt(3) * sin(acos(1 - b) / 3)
y1 = (1. / 3) * (-1 + yb + yc)
y2 = (1. / 3) * (-1 - 2 * yb)
y3 = (1. / 3) * (-1 + yb - yc)
g = 2 / sqrt(y1 - y2)
k2 = (y1 - y3) / (y1 - y2)
phi_z1 = phi_z(self._Om0, self._Ok0, y1, y2, z1)
phi_z2 = phi_z(self._Om0, self._Ok0, y1, y2, z2)
else:
return self._integral_comoving_distance_z1z2(z1, z2)
prefactor = self._hubble_distance / sqrt(abs(self._Ok0))
return prefactor * g * (ellipkinc(phi_z1, k2) - ellipkinc(phi_z2, k2))
def _dS_comoving_distance_z1z2(self, z1, z2):
r"""
Comoving line-of-sight distance in Mpc between objects at redshifts
``z1`` and ``z2`` in a flat, :math:`\Omega_{\Lambda}=1` cosmology
(de Sitter).
The comoving distance along the line-of-sight between two objects
remains constant with time for objects in the Hubble flow.
The de Sitter case has an analytic solution.
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts. Must be 1D or scalar.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving distance in Mpc between each input redshift.
"""
try:
z1, z2 = np.broadcast_arrays(z1, z2)
except ValueError as e:
raise ValueError("z1 and z2 have different shapes") from e
return self._hubble_distance * (z2 - z1)
def _EdS_comoving_distance_z1z2(self, z1, z2):
r"""
Comoving line-of-sight distance in Mpc between objects at redshifts
``z1`` and ``z2`` in a flat, :math:`\Omega_M=1` cosmology
(Einstein - de Sitter).
The comoving distance along the line-of-sight between two objects
remains constant with time for objects in the Hubble flow.
For :math:`\Omega_M=1`, :math:`\Omega_{rad}=0` the comoving distance
has an analytic solution.
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts. Must be 1D or scalar.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving distance in Mpc between each input redshift.
"""
try:
z1, z2 = np.broadcast_arrays(z1, z2)
except ValueError as e:
raise ValueError("z1 and z2 have different shapes") from e
prefactor = 2 * self._hubble_distance
return prefactor * ((z1 + 1.0)**(-1./2) - (z2 + 1.0)**(-1./2))
def _hypergeometric_comoving_distance_z1z2(self, z1, z2):
r"""
Comoving line-of-sight distance in Mpc between objects at redshifts
``z1`` and ``z2``.
The comoving distance along the line-of-sight between two objects
remains constant with time for objects in the Hubble flow.
For :math:`\Omega_{rad} = 0` the comoving distance can be directly
calculated as a hypergeometric function [1]_.
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving distance in Mpc between each input redshift.
References
----------
.. [1] Baes, M., Camps, P., & Van De Putte, D. (2017). Analytical
expressions and numerical evaluation of the luminosity distance
in a flat cosmology. MNRAS, 468(1), 927-930.
"""
try:
z1, z2 = np.broadcast_arrays(z1, z2)
except ValueError as e:
raise ValueError("z1 and z2 have different shapes") from e
s = ((1 - self._Om0) / self._Om0) ** (1./3)
# Use np.sqrt here to handle negative s (Om0>1).
prefactor = self._hubble_distance / np.sqrt(s * self._Om0)
return prefactor * (self._T_hypergeometric(s / (z1 + 1.0)) -
self._T_hypergeometric(s / (z2 + 1.0)))
def _T_hypergeometric(self, x):
r"""Compute value using Gauss Hypergeometric function 2F1.
.. math::
T(x) = 2 \sqrt(x) _{2}F_{1}\left(\frac{1}{6}, \frac{1}{2};
\frac{7}{6}; -x^3 \right)
Notes
-----
The :func:`scipy.special.hyp2f1` code already implements the
hypergeometric transformation suggested by Baes et al. [1]_ for use in
actual numerical evaulations.
References
----------
.. [1] Baes, M., Camps, P., & Van De Putte, D. (2017). Analytical
expressions and numerical evaluation of the luminosity distance
in a flat cosmology. MNRAS, 468(1), 927-930.
"""
return 2 * np.sqrt(x) * hyp2f1(1./6, 1./2, 7./6, -x**3)
def _dS_age(self, z):
"""Age of the universe in Gyr at redshift ``z``.
The age of a de Sitter Universe is infinite.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
The age of the universe in Gyr at each input redshift.
"""
t = (inf if isinstance(z, Number) else np.full_like(z, inf, dtype=float))
return self._hubble_time * t
def _EdS_age(self, z):
r"""Age of the universe in Gyr at redshift ``z``.
For :math:`\Omega_{rad} = 0` (:math:`T_{CMB} = 0`; massless neutrinos)
the age can be directly calculated as an elliptic integral [1]_.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
The age of the universe in Gyr at each input redshift.
References
----------
.. [1] Thomas, R., & Kantowski, R. (2000). Age-redshift relation for
standard cosmology. PRD, 62(10), 103507.
"""
return (2./3) * self._hubble_time * (aszarr(z) + 1.0) ** (-1.5)
def _flat_age(self, z):
r"""Age of the universe in Gyr at redshift ``z``.
For :math:`\Omega_{rad} = 0` (:math:`T_{CMB} = 0`; massless neutrinos)
the age can be directly calculated as an elliptic integral [1]_.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
The age of the universe in Gyr at each input redshift.
References
----------
.. [1] Thomas, R., & Kantowski, R. (2000). Age-redshift relation for
standard cosmology. PRD, 62(10), 103507.
"""
# Use np.sqrt, np.arcsinh instead of math.sqrt, math.asinh
# to handle properly the complex numbers for 1 - Om0 < 0
prefactor = (2./3) * self._hubble_time / np.emath.sqrt(1 - self._Om0)
arg = np.arcsinh(np.emath.sqrt((1 / self._Om0 - 1 + 0j) / (aszarr(z) + 1.0)**3))
return (prefactor * arg).real
def _EdS_lookback_time(self, z):
r"""Lookback time in Gyr to redshift ``z``.
The lookback time is the difference between the age of the Universe now
and the age at redshift ``z``.
For :math:`\Omega_{rad} = 0` (:math:`T_{CMB} = 0`; massless neutrinos)
the age can be directly calculated as an elliptic integral.
The lookback time is here calculated based on the ``age(0) - age(z)``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
Lookback time in Gyr to each input redshift.
"""
return self._EdS_age(0) - self._EdS_age(z)
def _dS_lookback_time(self, z):
r"""Lookback time in Gyr to redshift ``z``.
The lookback time is the difference between the age of the Universe now
and the age at redshift ``z``.
For :math:`\Omega_{rad} = 0` (:math:`T_{CMB} = 0`; massless neutrinos)
the age can be directly calculated.
.. math::
a = exp(H * t) \ \text{where t=0 at z=0}
t = (1/H) (ln 1 - ln a) = (1/H) (0 - ln (1/(1+z))) = (1/H) ln(1+z)
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
Lookback time in Gyr to each input redshift.
"""
return self._hubble_time * log(aszarr(z) + 1.0)
def _flat_lookback_time(self, z):
r"""Lookback time in Gyr to redshift ``z``.
The lookback time is the difference between the age of the Universe now
and the age at redshift ``z``.
For :math:`\Omega_{rad} = 0` (:math:`T_{CMB} = 0`; massless neutrinos)
the age can be directly calculated.
The lookback time is here calculated based on the ``age(0) - age(z)``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
Lookback time in Gyr to each input redshift.
"""
return self._flat_age(0) - self._flat_age(z)
def efunc(self, z):
"""Function used to calculate H(z), the Hubble parameter.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The redshift scaling of the Hubble constant.
Returns `float` if the input is scalar.
Defined such that :math:`H(z) = H_0 E(z)`.
"""
# We override this because it takes a particularly simple
# form for a cosmological constant
Or = self._Ogamma0 + (self._Onu0 if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z))
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return np.sqrt(zp1 ** 2 * ((Or * zp1 + self._Om0) * zp1 + self._Ok0) + self._Ode0)
def inv_efunc(self, z):
r"""Function used to calculate :math:`\frac{1}{H_z}`.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The inverse redshift scaling of the Hubble constant.
Returns `float` if the input is scalar.
Defined such that :math:`H_z = H_0 / E`.
"""
Or = self._Ogamma0 + (self._Onu0 if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z))
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return (zp1 ** 2 * ((Or * zp1 + self._Om0) * zp1 + self._Ok0) + self._Ode0)**(-0.5)
class FlatLambdaCDM(FlatFLRWMixin, LambdaCDM):
"""FLRW cosmology with a cosmological constant and no curvature.
This has no additional attributes beyond those of FLRW.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any computation
that requires its value will raise an exception.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Examples
--------
>>> from astropy.cosmology import FlatLambdaCDM
>>> cosmo = FlatLambdaCDM(H0=70, Om0=0.3)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
To get an equivalent cosmology, but of type `astropy.cosmology.LambdaCDM`,
use :attr:`astropy.cosmology.FlatFLRWMixin.nonflat`.
>>> cosmo.nonflat
LambdaCDM(H0=70.0 km / (Mpc s), Om0=0.3, ...
"""
def __init__(self, H0, Om0, Tcmb0=0.0*u.K, Neff=3.04, m_nu=0.0*u.eV,
Ob0=None, *, name=None, meta=None):
super().__init__(H0=H0, Om0=Om0, Ode0=0.0, Tcmb0=Tcmb0, Neff=Neff,
m_nu=m_nu, Ob0=Ob0, name=name, meta=meta)
# Please see :ref:`astropy-cosmology-fast-integrals` for discussion
# about what is being done here.
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.flcdm_inv_efunc_norel
self._inv_efunc_scalar_args = (self._Om0, self._Ode0)
# Repeat the optimization reassignments here because the init
# of the LambaCDM above didn't actually create a flat cosmology.
# That was done through the explicit tweak setting self._Ok0.
self._optimize_flat_norad()
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.flcdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (self._Om0, self._Ode0,
self._Ogamma0 + self._Onu0)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.flcdm_inv_efunc
self._inv_efunc_scalar_args = (self._Om0, self._Ode0,
self._Ogamma0, self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list)
def efunc(self, z):
"""Function used to calculate H(z), the Hubble parameter.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The redshift scaling of the Hubble constant.
Returns `float` if the input is scalar.
Defined such that :math:`H(z) = H_0 E(z)`.
"""
# We override this because it takes a particularly simple
# form for a cosmological constant
Or = self._Ogamma0 + (self._Onu0 if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z))
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return np.sqrt(zp1 ** 3 * (Or * zp1 + self._Om0) + self._Ode0)
def inv_efunc(self, z):
r"""Function used to calculate :math:`\frac{1}{H_z}`.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The inverse redshift scaling of the Hubble constant.
Returns `float` if the input is scalar.
Defined such that :math:`H_z = H_0 / E`.
"""
Or = self._Ogamma0 + (self._Onu0 if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z))
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return (zp1 ** 3 * (Or * zp1 + self._Om0) + self._Ode0)**(-0.5)
|
a78d80aab99d48fc89ecf0edc15ca7390d4b0c13426398ce19c9bffeb925c8d3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from numpy import exp
import astropy.units as u
from astropy.cosmology.parameter import Parameter
from astropy.cosmology.utils import aszarr
from . import scalar_inv_efuncs
from .base import FLRW, FlatFLRWMixin
__all__ = ["w0waCDM", "Flatw0waCDM"]
__doctest_requires__ = {'*': ['scipy']}
class w0waCDM(FLRW):
r"""FLRW cosmology with a CPL dark energy equation of state and curvature.
The equation for the dark energy equation of state uses the
CPL form as described in Chevallier & Polarski [1]_ and Linder [2]_:
:math:`w(z) = w_0 + w_a (1-a) = w_0 + w_a z / (1+z)`.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
Ode0 : float
Omega dark energy: density of dark energy in units of the critical
density at z=0.
w0 : float, optional
Dark energy equation of state at z=0 (a=1). This is pressure/density
for dark energy in units where c=1.
wa : float, optional
Negative derivative of the dark energy equation of state with respect
to the scale factor. A cosmological constant has w0=-1.0 and wa=0.0.
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any computation
that requires its value will raise an exception.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Examples
--------
>>> from astropy.cosmology import w0waCDM
>>> cosmo = w0waCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wa=0.2)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
References
----------
.. [1] Chevallier, M., & Polarski, D. (2001). Accelerating Universes with
Scaling Dark Matter. International Journal of Modern Physics D,
10(2), 213-223.
.. [2] Linder, E. (2003). Exploring the Expansion History of the
Universe. Phys. Rev. Lett., 90, 091301.
"""
w0 = Parameter(doc="Dark energy equation of state at z=0.", fvalidate="float")
wa = Parameter(doc="Negative derivative of dark energy equation of state w.r.t. a.",
fvalidate="float")
def __init__(self, H0, Om0, Ode0, w0=-1.0, wa=0.0, Tcmb0=0.0*u.K, Neff=3.04,
m_nu=0.0*u.eV, Ob0=None, *, name=None, meta=None):
super().__init__(H0=H0, Om0=Om0, Ode0=Ode0, Tcmb0=Tcmb0, Neff=Neff,
m_nu=m_nu, Ob0=Ob0, name=name, meta=meta)
self.w0 = w0
self.wa = wa
# Please see :ref:`astropy-cosmology-fast-integrals` for discussion
# about what is being done here.
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.w0wacdm_inv_efunc_norel
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._w0, self._wa)
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.w0wacdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._Ogamma0 + self._Onu0,
self._w0, self._wa)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.w0wacdm_inv_efunc
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._Ogamma0, self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list, self._w0,
self._wa)
def w(self, z):
r"""Returns dark energy equation of state at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
w : ndarray or float
The dark energy equation of state
Returns `float` if the input is scalar.
Notes
-----
The dark energy equation of state is defined as
:math:`w(z) = P(z)/\rho(z)`, where :math:`P(z)` is the pressure at
redshift z and :math:`\rho(z)` is the density at redshift z, both in
units where c=1. Here this is
:math:`w(z) = w_0 + w_a (1 - a) = w_0 + w_a \frac{z}{1+z}`.
"""
z = aszarr(z)
return self._w0 + self._wa * z / (z + 1.0)
def de_density_scale(self, z):
r"""Evaluates the redshift dependence of the dark energy density.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
I : ndarray or float
The scaling of the energy density of dark energy with redshift.
Returns `float` if the input is scalar.
Notes
-----
The scaling factor, I, is defined by :math:`\rho(z) = \rho_0 I`,
and in this case is given by
.. math::
I = \left(1 + z\right)^{3 \left(1 + w_0 + w_a\right)}
\exp \left(-3 w_a \frac{z}{1+z}\right)
"""
z = aszarr(z)
zp1 = z + 1.0 # (converts z [unit] -> z [dimensionless])
return zp1 ** (3 * (1 + self._w0 + self._wa)) * exp(-3 * self._wa * z / zp1)
class Flatw0waCDM(FlatFLRWMixin, w0waCDM):
"""FLRW cosmology with a CPL dark energy equation of state and no
curvature.
The equation for the dark energy equation of state uses the CPL form as
described in Chevallier & Polarski [1]_ and Linder [2]_:
:math:`w(z) = w_0 + w_a (1-a) = w_0 + w_a z / (1+z)`.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
w0 : float, optional
Dark energy equation of state at z=0 (a=1). This is pressure/density
for dark energy in units where c=1.
wa : float, optional
Negative derivative of the dark energy equation of state with respect
to the scale factor. A cosmological constant has w0=-1.0 and wa=0.0.
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any computation
that requires its value will raise an exception.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Examples
--------
>>> from astropy.cosmology import Flatw0waCDM
>>> cosmo = Flatw0waCDM(H0=70, Om0=0.3, w0=-0.9, wa=0.2)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
To get an equivalent cosmology, but of type `astropy.cosmology.w0waCDM`,
use :attr:`astropy.cosmology.FlatFLRWMixin.nonflat`.
>>> cosmo.nonflat
w0waCDM(H0=70.0 km / (Mpc s), Om0=0.3, ...
References
----------
.. [1] Chevallier, M., & Polarski, D. (2001). Accelerating Universes with
Scaling Dark Matter. International Journal of Modern Physics D,
10(2), 213-223.
.. [2] Linder, E. (2003). Exploring the Expansion History of the
Universe. Phys. Rev. Lett., 90, 091301.
"""
def __init__(self, H0, Om0, w0=-1.0, wa=0.0, Tcmb0=0.0*u.K, Neff=3.04,
m_nu=0.0*u.eV, Ob0=None, *, name=None, meta=None):
super().__init__(H0=H0, Om0=Om0, Ode0=0.0, w0=w0, wa=wa, Tcmb0=Tcmb0,
Neff=Neff, m_nu=m_nu, Ob0=Ob0, name=name, meta=meta)
# Please see :ref:`astropy-cosmology-fast-integrals` for discussion
# about what is being done here.
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.fw0wacdm_inv_efunc_norel
self._inv_efunc_scalar_args = (self._Om0, self._Ode0,
self._w0, self._wa)
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.fw0wacdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (self._Om0, self._Ode0,
self._Ogamma0 + self._Onu0,
self._w0, self._wa)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.fw0wacdm_inv_efunc
self._inv_efunc_scalar_args = (self._Om0, self._Ode0,
self._Ogamma0, self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list, self._w0,
self._wa)
|
729cfd9b0f559170349750da34a0dd17e3b567fdafa8e0d5f1a24cc658b70c8a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# THIRD PARTY
import pytest
# LOCAL
from astropy.cosmology import Cosmology
from astropy.cosmology.core import _COSMOLOGY_CLASSES
from astropy.cosmology.io.table import from_table, to_table
from astropy.table import QTable, Table, vstack
from .base import ToFromDirectTestBase, ToFromTestMixinBase
###############################################################################
class ToFromTableTestMixin(ToFromTestMixinBase):
"""
Tests for a Cosmology[To/From]Format with ``format="astropy.table"``.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must define a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmology`` for an example.
"""
def test_to_table_bad_index(self, from_format, to_format):
"""Test if argument ``index`` is incorrect"""
tbl = to_format("astropy.table")
# single-row table and has a non-0/None index
with pytest.raises(IndexError, match="index 2 out of range"):
from_format(tbl, index=2, format="astropy.table")
# string index where doesn't match
with pytest.raises(KeyError, match="No matches found for key"):
from_format(tbl, index="row 0", format="astropy.table")
# -----------------------
def test_to_table_failed_cls(self, to_format):
"""Test failed table type."""
with pytest.raises(TypeError, match="'cls' must be"):
to_format('astropy.table', cls=list)
@pytest.mark.parametrize("tbl_cls", [QTable, Table])
def test_to_table_cls(self, to_format, tbl_cls):
tbl = to_format('astropy.table', cls=tbl_cls)
assert isinstance(tbl, tbl_cls) # test type
# -----------------------
@pytest.mark.parametrize("in_meta", [True, False])
def test_to_table_in_meta(self, cosmo_cls, to_format, in_meta):
"""Test where the cosmology class is placed."""
tbl = to_format('astropy.table', cosmology_in_meta=in_meta)
# if it's in metadata, it's not a column. And vice versa.
if in_meta:
assert tbl.meta["cosmology"] == cosmo_cls.__qualname__
assert "cosmology" not in tbl.colnames # not also a column
else:
assert tbl["cosmology"][0] == cosmo_cls.__qualname__
assert "cosmology" not in tbl.meta
# -----------------------
def test_to_table(self, cosmo_cls, cosmo, to_format):
"""Test cosmology -> astropy.table."""
tbl = to_format("astropy.table")
# Test properties of Table.
assert isinstance(tbl, QTable)
assert tbl.meta["cosmology"] == cosmo_cls.__qualname__
assert tbl["name"] == cosmo.name
assert tbl.indices # indexed
# Test each Parameter column has expected information.
for n in cosmo.__parameters__:
P = getattr(cosmo_cls, n) # Parameter
col = tbl[n] # Column
# Compare the two
assert col.info.name == P.name
assert col.info.description == P.__doc__
assert col.info.meta == (cosmo.meta.get(n) or {})
# -----------------------
def test_from_not_table(self, cosmo, from_format):
"""Test not passing a Table to the Table parser."""
with pytest.raises((TypeError, ValueError)):
from_format("NOT A TABLE", format="astropy.table")
def test_tofrom_table_instance(self, cosmo_cls, cosmo, from_format, to_format):
"""Test cosmology -> astropy.table -> cosmology."""
tbl = to_format("astropy.table")
# add information
tbl["mismatching"] = "will error"
# tests are different if the last argument is a **kwarg
if tuple(cosmo._init_signature.parameters.values())[-1].kind == 4:
got = from_format(tbl, format="astropy.table")
assert got.__class__ is cosmo_cls
assert got.name == cosmo.name
assert "mismatching" not in got.meta
return # don't continue testing
# read with mismatching parameters errors
with pytest.raises(TypeError, match="there are unused parameters"):
from_format(tbl, format="astropy.table")
# unless mismatched are moved to meta
got = from_format(tbl, format="astropy.table", move_to_meta=True)
assert got == cosmo
assert got.meta["mismatching"] == "will error"
# it won't error if everything matches up
tbl.remove_column("mismatching")
got = from_format(tbl, format="astropy.table")
assert got == cosmo
# and it will also work if the cosmology is a class
# Note this is not the default output of ``to_format``.
tbl.meta["cosmology"] = _COSMOLOGY_CLASSES[tbl.meta["cosmology"]]
got = from_format(tbl, format="astropy.table")
assert got == cosmo
# also it auto-identifies 'format'
got = from_format(tbl)
assert got == cosmo
def test_fromformat_table_subclass_partial_info(self, cosmo_cls, cosmo,
from_format, to_format):
"""
Test writing from an instance and reading from that class.
This works with missing information.
"""
# test to_format
tbl = to_format("astropy.table")
assert isinstance(tbl, QTable)
# partial information
tbl.meta.pop("cosmology", None)
del tbl["Tcmb0"]
# read with the same class that wrote fills in the missing info with
# the default value
got = cosmo_cls.from_format(tbl, format="astropy.table")
got2 = from_format(tbl, format="astropy.table", cosmology=cosmo_cls)
got3 = from_format(tbl, format="astropy.table", cosmology=cosmo_cls.__qualname__)
assert (got == got2) and (got2 == got3) # internal consistency
# not equal, because Tcmb0 is changed, which also changes m_nu
assert got != cosmo
assert got.Tcmb0 == cosmo_cls._init_signature.parameters["Tcmb0"].default
assert got.clone(name=cosmo.name, Tcmb0=cosmo.Tcmb0, m_nu=cosmo.m_nu) == cosmo
# but the metadata is the same
assert got.meta == cosmo.meta
@pytest.mark.parametrize("add_index", [True, False])
def test_tofrom_table_mutlirow(self, cosmo_cls, cosmo, from_format, add_index):
"""Test if table has multiple rows."""
# ------------
# To Table
cosmo1 = cosmo.clone(name="row 0")
cosmo2 = cosmo.clone(name="row 2")
tbl = vstack([c.to_format("astropy.table") for c in (cosmo1, cosmo, cosmo2)],
metadata_conflicts='silent')
assert isinstance(tbl, QTable)
assert tbl.meta["cosmology"] == cosmo_cls.__qualname__
assert tbl[1]["name"] == cosmo.name
# whether to add an index. `from_format` can work with or without.
if add_index:
tbl.add_index("name", unique=True)
# ------------
# From Table
# it will error on a multi-row table
with pytest.raises(ValueError, match="need to select a specific row"):
from_format(tbl, format="astropy.table")
# unless the index argument is provided
got = from_format(tbl, index=1, format="astropy.table")
assert got == cosmo
# the index can be a string
got = from_format(tbl, index=cosmo.name, format="astropy.table")
assert got == cosmo
# when there's more than one cosmology found
tbls = vstack([tbl, tbl], metadata_conflicts="silent")
with pytest.raises(ValueError, match="more than one"):
from_format(tbls, index=cosmo.name, format="astropy.table")
@pytest.mark.parametrize("format", [True, False, None, "astropy.table"])
def test_is_equivalent_to_table(self, cosmo, to_format, format):
"""Test :meth:`astropy.cosmology.Cosmology.is_equivalent`.
This test checks that Cosmology equivalency can be extended to any
Python object that can be converted to a Cosmology -- in this case
a |Table|.
"""
obj = to_format("astropy.table")
assert not isinstance(obj, Cosmology)
is_equiv = cosmo.is_equivalent(obj, format=format)
assert is_equiv is (True if format is not False else False)
class TestToFromTable(ToFromDirectTestBase, ToFromTableTestMixin):
"""Directly test ``to/from_table``."""
def setup_class(self):
self.functions = {"to": to_table, "from": from_table}
|
1b3e7496abd3c968b331d63307ca25756d9b64eb053a87826982ea38df48047b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# STDLIB
# THIRD PARTY
import pytest
# LOCAL
import astropy.units as u
from astropy.cosmology import Cosmology, FlatLambdaCDM, Planck18
from astropy.cosmology import units as cu
from astropy.cosmology.io.yaml import from_yaml, to_yaml, yaml_constructor, yaml_representer
from astropy.io.misc.yaml import AstropyDumper, dump, load
from .base import ToFromDirectTestBase, ToFromTestMixinBase
##############################################################################
# Test Serializer
def test_yaml_representer():
"""Test :func:`~astropy.cosmology.io.yaml.yaml_representer`."""
# test function `representer`
representer = yaml_representer("!astropy.cosmology.flrw.LambdaCDM")
assert callable(representer)
# test the normal method of dumping to YAML
yml = dump(Planck18)
assert isinstance(yml, str)
assert yml.startswith("!astropy.cosmology.flrw.FlatLambdaCDM")
def test_yaml_constructor():
"""Test :func:`~astropy.cosmology.io.yaml.yaml_constructor`."""
# test function `constructor`
constructor = yaml_constructor(FlatLambdaCDM)
assert callable(constructor)
# it's too hard to manually construct a node, so we only test dump/load
# this is also a good round-trip test
yml = dump(Planck18)
with u.add_enabled_units(cu): # needed for redshift units
cosmo = load(yml)
assert isinstance(cosmo, FlatLambdaCDM)
assert cosmo == Planck18
assert cosmo.meta == Planck18.meta
##############################################################################
# Test Unified I/O
class ToFromYAMLTestMixin(ToFromTestMixinBase):
"""
Tests for a Cosmology[To/From]Format with ``format="yaml"``.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must define a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmologyToFromFormat`` or ``TestCosmology`` for examples.
"""
@pytest.fixture
def xfail_if_not_registered_with_yaml(self, cosmo_cls):
"""
YAML I/O only works on registered classes. So the thing to check is
if this class is registered. If not, :func:`pytest.xfail` this test.
Some of the tests define custom cosmologies. They are not registered.
"""
if cosmo_cls not in AstropyDumper.yaml_representers:
pytest.xfail(f"Cosmologies of type {cosmo_cls} are not registered with YAML.")
# ===============================================================
def test_to_yaml(self, cosmo, to_format, xfail_if_not_registered_with_yaml):
"""Test cosmology -> YAML."""
yml = to_format('yaml')
assert isinstance(yml, str) # test type
assert yml.startswith("!astropy.cosmology.")
def test_from_yaml_default(self, cosmo, to_format, from_format,
xfail_if_not_registered_with_yaml):
"""Test cosmology -> YAML -> cosmology."""
yml = to_format('yaml')
got = from_format(yml, format="yaml") # (cannot autoidentify)
assert got.name == cosmo.name
assert got.meta == cosmo.meta
# it won't error if everything matches up
got = from_format(yml, format="yaml")
assert got == cosmo
assert got.meta == cosmo.meta
# auto-identify test moved because it doesn't work.
# see test_from_yaml_autoidentify
def test_from_yaml_autoidentify(self, cosmo, to_format, from_format,
xfail_if_not_registered_with_yaml):
"""As a non-path string, it does NOT auto-identifies 'format'.
TODO! this says there should be different types of I/O registries.
not just hacking object conversion on top of file I/O.
"""
assert self.can_autodentify("yaml") is False
# Showing the specific error. The str is interpreted as a file location
# but is too long a file name.
yml = to_format('yaml')
with pytest.raises((FileNotFoundError, OSError)): # OSError in Windows
from_format(yml)
# # TODO! this is a challenging test to write. It's also unlikely to happen.
# def test_fromformat_subclass_partial_info_yaml(self, cosmo):
# """
# Test writing from an instance and reading from that class.
# This works with missing information.
# """
# -----------------------------------------------------
@pytest.mark.parametrize("format", [True, False, None])
def test_is_equivalent_to_yaml(self, cosmo, to_format, format,
xfail_if_not_registered_with_yaml):
"""Test :meth:`astropy.cosmology.Cosmology.is_equivalent`.
This test checks that Cosmology equivalency can be extended to any
Python object that can be converted to a Cosmology -- in this case
a YAML string. YAML can't be identified without "format" specified.
"""
obj = to_format("yaml")
assert not isinstance(obj, Cosmology)
is_equiv = cosmo.is_equivalent(obj, format=format)
assert is_equiv is False
def test_is_equivalent_to_yaml_specify_format(self, cosmo, to_format,
xfail_if_not_registered_with_yaml):
"""Test :meth:`astropy.cosmology.Cosmology.is_equivalent`.
Same as ``test_is_equivalent_to_yaml`` but with ``format="yaml"``.
"""
assert cosmo.is_equivalent(to_format("yaml"), format="yaml") is True
class TestToFromYAML(ToFromDirectTestBase, ToFromYAMLTestMixin):
"""
Directly test ``to/from_yaml``.
These are not public API and are discouraged from use, in favor of
``Cosmology.to/from_format(..., format="yaml")``, but should be tested
regardless b/c 3rd party packages might use these in their Cosmology I/O.
Also, it's cheap to test.
"""
def setup_class(self):
"""Set up fixtures to use ``to/from_yaml``, not the I/O abstractions."""
self.functions = {"to": to_yaml, "from": from_yaml}
@pytest.fixture(scope="class", autouse=True)
def setup(self):
"""
Setup and teardown for tests.
This overrides from super because `ToFromDirectTestBase` adds a custom
Cosmology ``CosmologyWithKwargs`` that is not registered with YAML.
"""
yield # run tests
def test_from_yaml_autoidentify(self, cosmo, to_format, from_format):
"""
If directly calling the function there's no auto-identification.
So this overrides the test from `ToFromYAMLTestMixin`
"""
|
8ad2c754b71f7e11646b650d90abd43ed0f5a8c06a6510e8264b393ad04c4e1e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test that all expected methods are present, before I/O tests import.
This file is weirdly named so that it's the first test of I/O.
"""
from astropy.cosmology.connect import convert_registry, readwrite_registry
def test_expected_readwrite_io():
"""Test that ONLY the expected I/O is registered."""
got = {k for k, _ in readwrite_registry._readers.keys()}
expected = {"ascii.ecsv"}
assert got == expected
def test_expected_convert_io():
"""Test that ONLY the expected I/O is registered."""
got = {k for k, _ in convert_registry._readers.keys()}
expected = {"astropy.cosmology", "mapping", "astropy.model", "astropy.row",
"astropy.table", "yaml"}
assert got == expected
|
af84892ba7d15306ba37229c3b368067eba5293a65a6b0bcd1b6a84a1fa6271e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# STDLIB
import json
import os
# THIRD PARTY
import pytest
# LOCAL
import astropy.units as u
from astropy.cosmology import units as cu
from astropy.cosmology.connect import readwrite_registry
from astropy.cosmology.core import Cosmology
from .base import ReadWriteDirectTestBase, ReadWriteTestMixinBase
###############################################################################
def read_json(filename, **kwargs):
"""Read JSON.
Parameters
----------
filename : str
**kwargs
Keyword arguments into :meth:`~astropy.cosmology.Cosmology.from_format`
Returns
-------
`~astropy.cosmology.Cosmology` instance
"""
# read
if isinstance(filename, (str, bytes, os.PathLike)):
with open(filename, "r") as file:
data = file.read()
else: # file-like : this also handles errors in dumping
data = filename.read()
mapping = json.loads(data) # parse json mappable to dict
# deserialize Quantity
with u.add_enabled_units(cu.redshift):
for k, v in mapping.items():
if isinstance(v, dict) and "value" in v and "unit" in v:
mapping[k] = u.Quantity(v["value"], v["unit"])
for k, v in mapping.get("meta", {}).items(): # also the metadata
if isinstance(v, dict) and "value" in v and "unit" in v:
mapping["meta"][k] = u.Quantity(v["value"], v["unit"])
return Cosmology.from_format(mapping, format="mapping", **kwargs)
def write_json(cosmology, file, *, overwrite=False):
"""Write Cosmology to JSON.
Parameters
----------
cosmology : `astropy.cosmology.Cosmology` subclass instance
file : path-like or file-like
overwrite : bool (optional, keyword-only)
"""
data = cosmology.to_format("mapping") # start by turning into dict
data["cosmology"] = data["cosmology"].__qualname__
# serialize Quantity
for k, v in data.items():
if isinstance(v, u.Quantity):
data[k] = {"value": v.value.tolist(), "unit": str(v.unit)}
for k, v in data.get("meta", {}).items(): # also serialize the metadata
if isinstance(v, u.Quantity):
data["meta"][k] = {"value": v.value.tolist(), "unit": str(v.unit)}
# check that file exists and whether to overwrite.
if os.path.exists(file) and not overwrite:
raise IOError(f"{file} exists. Set 'overwrite' to write over.")
with open(file, "w") as write_file:
json.dump(data, write_file)
def json_identify(origin, filepath, fileobj, *args, **kwargs):
return filepath is not None and filepath.endswith(".json")
###############################################################################
class ReadWriteJSONTestMixin(ReadWriteTestMixinBase):
"""
Tests for a Cosmology[Read/Write] with ``format="json"``.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must dfine a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmology`` for an example.
"""
@pytest.fixture(scope="class", autouse=True)
def register_and_unregister_json(self):
"""Setup & teardown for JSON read/write tests."""
# Register
readwrite_registry.register_reader("json", Cosmology, read_json, force=True)
readwrite_registry.register_writer("json", Cosmology, write_json, force=True)
readwrite_registry.register_identifier("json", Cosmology, json_identify, force=True)
yield # Run all tests in class
# Unregister
readwrite_registry.unregister_reader("json", Cosmology)
readwrite_registry.unregister_writer("json", Cosmology)
readwrite_registry.unregister_identifier("json", Cosmology)
# ========================================================================
def test_readwrite_json_subclass_partial_info(self, cosmo_cls, cosmo, read,
write, tmp_path, add_cu):
"""
Test writing from an instance and reading from that class.
This works with missing information.
"""
fp = tmp_path / "test_readwrite_json_subclass_partial_info.json"
# test write
cosmo.write(fp, format="json")
# partial information
with open(fp, "r") as file:
L = file.readlines()[0]
L = L[: L.index('"cosmology":')] + L[L.index(", ") + 2 :] # remove cosmology # noqa: #203
i = L.index('"Tcmb0":') # delete Tcmb0
L = L[:i] + L[L.index(", ", L.index(", ", i) + 1) + 2 :] # second occurence # noqa: #203
tempfname = tmp_path / f"{cosmo.name}_temp.json"
with open(tempfname, "w") as file:
file.writelines([L])
# read with the same class that wrote fills in the missing info with
# the default value
got = cosmo_cls.read(tempfname, format="json")
got2 = read(tempfname, format="json", cosmology=cosmo_cls)
got3 = read(tempfname, format="json", cosmology=cosmo_cls.__qualname__)
assert (got == got2) and (got2 == got3) # internal consistency
# not equal, because Tcmb0 is changed, which also changes m_nu
assert got != cosmo
assert got.Tcmb0 == cosmo_cls._init_signature.parameters["Tcmb0"].default
assert got.clone(name=cosmo.name, Tcmb0=cosmo.Tcmb0, m_nu=cosmo.m_nu) == cosmo
# but the metadata is the same
assert got.meta == cosmo.meta
class TestReadWriteJSON(ReadWriteDirectTestBase, ReadWriteJSONTestMixin):
"""
Directly test ``read/write_json``.
These are not public API and are discouraged from use, in favor of
``Cosmology.read/write(..., format="json")``, but should be
tested regardless b/c they are used internally.
"""
def setup_class(self):
self.functions = {"read": read_json, "write": write_json}
|
bc3e4a725e541a562a62c76f0d39488b09145a86317d2b699e087cf390ecbc5a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Testing :mod:`astropy.cosmology.flrw.lambdacdm`."""
##############################################################################
# IMPORTS
# STDLIB
# THIRD PARTY
import pytest
# LOCAL
import astropy.units as u
from astropy.cosmology import FlatLambdaCDM, LambdaCDM
from astropy.cosmology.flrw.lambdacdm import ellipkinc, hyp2f1
from astropy.cosmology.tests.helper import get_redshift_methods
from astropy.cosmology.tests.test_core import invalid_zs, valid_zs
from astropy.utils.compat.optional_deps import HAS_SCIPY
from .test_base import FlatFLRWMixinTest, FLRWSubclassTest
##############################################################################
# TESTS
##############################################################################
@pytest.mark.skipif(HAS_SCIPY, reason="scipy is installed")
def test_optional_deps_functions():
"""Test stand-in functions when optional dependencies not installed."""
with pytest.raises(ModuleNotFoundError, match="No module named 'scipy.special'"):
ellipkinc()
with pytest.raises(ModuleNotFoundError, match="No module named 'scipy.special'"):
hyp2f1()
##############################################################################
class TestLambdaCDM(FLRWSubclassTest):
"""Test :class:`astropy.cosmology.LambdaCDM`."""
def setup_class(self):
"""Setup for testing."""
super().setup_class(self)
self.cls = LambdaCDM
# ===============================================================
# Method & Attribute Tests
_FLRW_redshift_methods = (
get_redshift_methods(LambdaCDM, include_private=True, include_z2=False)
- {"_dS_age"})
# `_dS_age` is removed because it doesn't strictly rely on the value of `z`,
# so any input that doesn't trip up ``np.shape`` is "valid"
@pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed")
@pytest.mark.parametrize("z, exc", invalid_zs)
@pytest.mark.parametrize('method', _FLRW_redshift_methods)
def test_redshift_method_bad_input(self, cosmo, method, z, exc):
"""Test all the redshift methods for bad input."""
super().test_redshift_method_bad_input(cosmo, method, z, exc)
@pytest.mark.parametrize("z", valid_zs)
def test_w(self, cosmo, z):
"""Test :meth:`astropy.cosmology.LambdaCDM.w`."""
super().test_w(cosmo, z)
w = cosmo.w(z)
assert u.allclose(w, -1.0)
def test_repr(self, cosmo_cls, cosmo):
"""Test method ``.__repr__()``."""
super().test_repr(cosmo_cls, cosmo)
expected = ("LambdaCDM(name=\"ABCMeta\", H0=70.0 km / (Mpc s), Om0=0.27,"
" Ode0=0.73, Tcmb0=3.0 K, Neff=3.04, m_nu=[0. 0. 0.] eV,"
" Ob0=0.03)")
assert repr(cosmo) == expected
# -----------------------------------------------------------------------------
class TestFlatLambdaCDM(FlatFLRWMixinTest, TestLambdaCDM):
"""Test :class:`astropy.cosmology.FlatLambdaCDM`."""
def setup_class(self):
"""Setup for testing."""
super().setup_class(self)
self.cls = FlatLambdaCDM
@pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed")
@pytest.mark.parametrize("z, exc", invalid_zs)
@pytest.mark.parametrize('method', TestLambdaCDM._FLRW_redshift_methods - {"Otot"})
def test_redshift_method_bad_input(self, cosmo, method, z, exc):
"""Test all the redshift methods for bad input."""
super().test_redshift_method_bad_input(cosmo, method, z, exc)
# ===============================================================
# Method & Attribute Tests
def test_repr(self, cosmo_cls, cosmo):
"""Test method ``.__repr__()``."""
super().test_repr(cosmo_cls, cosmo)
expected = ("FlatLambdaCDM(name=\"ABCMeta\", H0=70.0 km / (Mpc s),"
" Om0=0.27, Tcmb0=3.0 K, Neff=3.04, m_nu=[0. 0. 0.] eV,"
" Ob0=0.03)")
assert repr(cosmo) == expected
|
e53bccca2454bfeb4019ae4f3a86c23410a7960f346231cf3aadf49da59c8627 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Testing :mod:`astropy.cosmology.flrw.__init__.py`."""
##############################################################################
# IMPORTS
import pytest
from astropy.utils import resolve_name
from astropy.utils.exceptions import AstropyDeprecationWarning
##############################################################################
# TESTS
##############################################################################
@pytest.mark.parametrize(
"attr",
[
"H0units_to_invs",
"a_B_c2",
"critdens_const",
"kB_evK",
"quad",
"radian_in_arcmin",
"radian_in_arcsec",
"sec_to_Gyr",
"ellipkinc",
"hyp2f1",
],
)
def test_deprecated_private_variables(attr):
"""Test deprecation warnings are raised for private variables."""
with pytest.warns(AstropyDeprecationWarning):
resolve_name("astropy", "cosmology", "flrw", attr)
def test_getattr_error_attr_not_found():
"""Test getattr raises error for DNE."""
with pytest.raises(ImportError):
from astropy.cosmology.flrw import this_is_not_a_variable # noqa: F401
|
582b1529816c54f0c5117c49c7aaf49afb27019de6ff013ead7a8533502f6e25 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Testing :mod:`astropy.cosmology.flrw.base`."""
##############################################################################
# IMPORTS
# STDLIB
import abc
import copy
# THIRD PARTY
import numpy as np
import pytest
import astropy.constants as const
# LOCAL
import astropy.units as u
from astropy.cosmology import FLRW, FlatLambdaCDM, LambdaCDM, Planck18
from astropy.cosmology.core import _COSMOLOGY_CLASSES
from astropy.cosmology.flrw.base import _a_B_c2, _critdens_const, _H0units_to_invs, quad
from astropy.cosmology.parameter import Parameter
from astropy.cosmology.tests.helper import get_redshift_methods
from astropy.cosmology.tests.test_core import CosmologySubclassTest as CosmologyTest
from astropy.cosmology.tests.test_core import (
FlatCosmologyMixinTest, ParameterTestMixin, invalid_zs, valid_zs)
from astropy.utils.compat.optional_deps import HAS_SCIPY
##############################################################################
# SETUP / TEARDOWN
class SubFLRW(FLRW):
def w(self, z):
return super().w(z)
##############################################################################
# TESTS
##############################################################################
@pytest.mark.skipif(HAS_SCIPY, reason="scipy is installed")
def test_optional_deps_functions():
"""Test stand-in functions when optional dependencies not installed."""
with pytest.raises(ModuleNotFoundError, match="No module named 'scipy.integrate'"):
quad()
##############################################################################
class ParameterH0TestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` H0 on a Cosmology.
H0 is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_H0(self, cosmo_cls, cosmo):
"""Test Parameter ``H0``."""
unit = u.Unit("km/(s Mpc)")
# on the class
assert isinstance(cosmo_cls.H0, Parameter)
assert "Hubble constant" in cosmo_cls.H0.__doc__
assert cosmo_cls.H0.unit == unit
# validation
assert cosmo_cls.H0.validate(cosmo, 1) == 1 * unit
assert cosmo_cls.H0.validate(cosmo, 10 * unit) == 10 * unit
with pytest.raises(ValueError, match="H0 is a non-scalar quantity"):
cosmo_cls.H0.validate(cosmo, [1, 2])
# on the instance
assert cosmo.H0 is cosmo._H0
assert cosmo.H0 == self._cls_args["H0"]
assert isinstance(cosmo.H0, u.Quantity) and cosmo.H0.unit == unit
def test_init_H0(self, cosmo_cls, ba):
"""Test initialization for values of ``H0``."""
# test that it works with units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.H0 == ba.arguments["H0"]
# also without units
ba.arguments["H0"] = ba.arguments["H0"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.H0.value == ba.arguments["H0"]
# fails for non-scalar
ba.arguments["H0"] = u.Quantity([70, 100], u.km / u.s / u.Mpc)
with pytest.raises(ValueError, match="H0 is a non-scalar quantity"):
cosmo_cls(*ba.args, **ba.kwargs)
class ParameterOm0TestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` Om0 on a Cosmology.
Om0 is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_Om0(self, cosmo_cls, cosmo):
"""Test Parameter ``Om0``."""
# on the class
assert isinstance(cosmo_cls.Om0, Parameter)
assert "Omega matter" in cosmo_cls.Om0.__doc__
# validation
assert cosmo_cls.Om0.validate(cosmo, 1) == 1
assert cosmo_cls.Om0.validate(cosmo, 10 * u.one) == 10
with pytest.raises(ValueError, match="Om0 cannot be negative"):
cosmo_cls.Om0.validate(cosmo, -1)
# on the instance
assert cosmo.Om0 is cosmo._Om0
assert cosmo.Om0 == self._cls_args["Om0"]
assert isinstance(cosmo.Om0, float)
def test_init_Om0(self, cosmo_cls, ba):
"""Test initialization for values of ``Om0``."""
# test that it works with units
ba.arguments["Om0"] = ba.arguments["Om0"] << u.one # ensure units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Om0 == ba.arguments["Om0"]
# also without units
ba.arguments["Om0"] = ba.arguments["Om0"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Om0 == ba.arguments["Om0"]
# fails for negative numbers
ba.arguments["Om0"] = -0.27
with pytest.raises(ValueError, match="Om0 cannot be negative."):
cosmo_cls(*ba.args, **ba.kwargs)
class ParameterOde0TestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` Ode0 on a Cosmology.
Ode0 is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_Parameter_Ode0(self, cosmo_cls):
"""Test Parameter ``Ode0`` on the class."""
assert isinstance(cosmo_cls.Ode0, Parameter)
assert "Omega dark energy" in cosmo_cls.Ode0.__doc__
def test_Parameter_Ode0_validation(self, cosmo_cls, cosmo):
"""Test Parameter ``Ode0`` validation."""
assert cosmo_cls.Ode0.validate(cosmo, 1.1) == 1.1
assert cosmo_cls.Ode0.validate(cosmo, 10 * u.one) == 10.0
with pytest.raises(TypeError, match="only dimensionless"):
cosmo_cls.Ode0.validate(cosmo, 10 * u.km)
def test_Ode0(self, cosmo):
"""Test Parameter ``Ode0`` validation."""
# if Ode0 is a parameter, test its value
assert cosmo.Ode0 is cosmo._Ode0
assert cosmo.Ode0 == self._cls_args["Ode0"]
assert isinstance(cosmo.Ode0, float)
def test_init_Ode0(self, cosmo_cls, ba):
"""Test initialization for values of ``Ode0``."""
# test that it works with units
ba.arguments["Ode0"] = ba.arguments["Ode0"] << u.one # ensure units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Ode0 == ba.arguments["Ode0"]
# also without units
ba.arguments["Ode0"] = ba.arguments["Ode0"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Ode0 == ba.arguments["Ode0"]
# Setting param to 0 respects that. Note this test uses ``Ode()``.
ba.arguments["Ode0"] = 0.0
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert u.allclose(cosmo.Ode([0, 1, 2, 3]), [0, 0, 0, 0])
assert u.allclose(cosmo.Ode(1), 0)
# Must be dimensionless or have no units. Errors otherwise.
ba.arguments["Ode0"] = 10 * u.km
with pytest.raises(TypeError, match="only dimensionless"):
cosmo_cls(*ba.args, **ba.kwargs)
class ParameterTcmb0TestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` Tcmb0 on a Cosmology.
Tcmb0 is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_Tcmb0(self, cosmo_cls, cosmo):
"""Test Parameter ``Tcmb0``."""
# on the class
assert isinstance(cosmo_cls.Tcmb0, Parameter)
assert "Temperature of the CMB" in cosmo_cls.Tcmb0.__doc__
assert cosmo_cls.Tcmb0.unit == u.K
# validation
assert cosmo_cls.Tcmb0.validate(cosmo, 1) == 1 * u.K
assert cosmo_cls.Tcmb0.validate(cosmo, 10 * u.K) == 10 * u.K
with pytest.raises(ValueError, match="Tcmb0 is a non-scalar quantity"):
cosmo_cls.Tcmb0.validate(cosmo, [1, 2])
# on the instance
assert cosmo.Tcmb0 is cosmo._Tcmb0
assert cosmo.Tcmb0 == self.cls_kwargs["Tcmb0"]
assert isinstance(cosmo.Tcmb0, u.Quantity) and cosmo.Tcmb0.unit == u.K
def test_init_Tcmb0(self, cosmo_cls, ba):
"""Test initialization for values of ``Tcmb0``."""
# test that it works with units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Tcmb0 == ba.arguments["Tcmb0"]
# also without units
ba.arguments["Tcmb0"] = ba.arguments["Tcmb0"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Tcmb0.value == ba.arguments["Tcmb0"]
# must be a scalar
ba.arguments["Tcmb0"] = u.Quantity([0.0, 2], u.K)
with pytest.raises(ValueError, match="Tcmb0 is a non-scalar quantity"):
cosmo_cls(*ba.args, **ba.kwargs)
class ParameterNeffTestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` Neff on a Cosmology.
Neff is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_Neff(self, cosmo_cls, cosmo):
"""Test Parameter ``Neff``."""
# on the class
assert isinstance(cosmo_cls.Neff, Parameter)
assert "Number of effective neutrino species" in cosmo_cls.Neff.__doc__
# validation
assert cosmo_cls.Neff.validate(cosmo, 1) == 1
assert cosmo_cls.Neff.validate(cosmo, 10 * u.one) == 10
with pytest.raises(ValueError, match="Neff cannot be negative"):
cosmo_cls.Neff.validate(cosmo, -1)
# on the instance
assert cosmo.Neff is cosmo._Neff
assert cosmo.Neff == self.cls_kwargs.get("Neff", 3.04)
assert isinstance(cosmo.Neff, float)
def test_init_Neff(self, cosmo_cls, ba):
"""Test initialization for values of ``Neff``."""
# test that it works with units
ba.arguments["Neff"] = ba.arguments["Neff"] << u.one # ensure units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Neff == ba.arguments["Neff"]
# also without units
ba.arguments["Neff"] = ba.arguments["Neff"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Neff == ba.arguments["Neff"]
ba.arguments["Neff"] = -1
with pytest.raises(ValueError):
cosmo_cls(*ba.args, **ba.kwargs)
class Parameterm_nuTestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` m_nu on a Cosmology.
m_nu is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_m_nu(self, cosmo_cls, cosmo):
"""Test Parameter ``m_nu``."""
# on the class
assert isinstance(cosmo_cls.m_nu, Parameter)
assert "Mass of neutrino species" in cosmo_cls.m_nu.__doc__
assert cosmo_cls.m_nu.unit == u.eV
assert cosmo_cls.m_nu.equivalencies == u.mass_energy()
# on the instance
# assert cosmo.m_nu is cosmo._m_nu
assert u.allclose(cosmo.m_nu, [0.0, 0.0, 0.0] * u.eV)
# set differently depending on the other inputs
if cosmo.Tnu0.value == 0:
assert cosmo.m_nu is None
elif not cosmo._massivenu: # only massless
assert u.allclose(cosmo.m_nu, 0 * u.eV)
elif self._nmasslessnu == 0: # only massive
assert cosmo.m_nu == cosmo._massivenu_mass
else: # a mix -- the most complicated case
assert u.allclose(cosmo.m_nu[:self._nmasslessnu], 0 * u.eV)
assert u.allclose(cosmo.m_nu[self._nmasslessnu], cosmo._massivenu_mass)
def test_init_m_nu(self, cosmo_cls, ba):
"""Test initialization for values of ``m_nu``.
Note this requires the class to have a property ``has_massive_nu``.
"""
# Test that it works when m_nu has units.
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert np.all(cosmo.m_nu == ba.arguments["m_nu"]) # (& checks len, unit)
assert not cosmo.has_massive_nu
assert cosmo.m_nu.unit == u.eV # explicitly check unit once.
# And it works when m_nu doesn't have units.
ba.arguments["m_nu"] = ba.arguments["m_nu"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert np.all(cosmo.m_nu.value == ba.arguments["m_nu"])
assert not cosmo.has_massive_nu
# A negative m_nu raises an exception.
tba = copy.copy(ba)
tba.arguments["m_nu"] = u.Quantity([-0.3, 0.2, 0.1], u.eV)
with pytest.raises(ValueError, match="invalid"):
cosmo_cls(*tba.args, **tba.kwargs)
def test_init_m_nu_and_Neff(self, cosmo_cls, ba):
"""Test initialization for values of ``m_nu`` and ``Neff``.
Note this test requires ``Neff`` as constructor input, and a property
``has_massive_nu``.
"""
# Mismatch with Neff = wrong number of neutrinos
tba = copy.copy(ba)
tba.arguments["Neff"] = 4.05
tba.arguments["m_nu"] = u.Quantity([0.15, 0.2, 0.1], u.eV)
with pytest.raises(ValueError, match="unexpected number of neutrino"):
cosmo_cls(*tba.args, **tba.kwargs)
# No neutrinos, but Neff
tba.arguments["m_nu"] = 0
cosmo = cosmo_cls(*tba.args, **tba.kwargs)
assert not cosmo.has_massive_nu
assert len(cosmo.m_nu) == 4
assert cosmo.m_nu.unit == u.eV
assert u.allclose(cosmo.m_nu, 0 * u.eV)
# TODO! move this test when create ``test_nu_relative_density``
assert u.allclose(cosmo.nu_relative_density(1.0), 0.22710731766 * 4.05, rtol=1e-6)
# All massive neutrinos case, len from Neff
tba.arguments["m_nu"] = 0.1 * u.eV
cosmo = cosmo_cls(*tba.args, **tba.kwargs)
assert cosmo.has_massive_nu
assert len(cosmo.m_nu) == 4
assert cosmo.m_nu.unit == u.eV
assert u.allclose(cosmo.m_nu, [0.1, 0.1, 0.1, 0.1] * u.eV)
def test_init_m_nu_override_by_Tcmb0(self, cosmo_cls, ba):
"""Test initialization for values of ``m_nu``.
Note this test requires ``Tcmb0`` as constructor input, and a property
``has_massive_nu``.
"""
# If Neff = 0, m_nu is None.
tba = copy.copy(ba)
tba.arguments["Neff"] = 0
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.m_nu is None
assert not cosmo.has_massive_nu
# If Tcmb0 = 0, m_nu is None
tba = copy.copy(ba)
tba.arguments["Tcmb0"] = 0
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.m_nu is None
assert not cosmo.has_massive_nu
class ParameterOb0TestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` Ob0 on a Cosmology.
Ob0 is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_Ob0(self, cosmo_cls, cosmo):
"""Test Parameter ``Ob0``."""
# on the class
assert isinstance(cosmo_cls.Ob0, Parameter)
assert "Omega baryon;" in cosmo_cls.Ob0.__doc__
# validation
assert cosmo_cls.Ob0.validate(cosmo, None) is None
assert cosmo_cls.Ob0.validate(cosmo, 0.1) == 0.1
assert cosmo_cls.Ob0.validate(cosmo, 0.1 * u.one) == 0.1
with pytest.raises(ValueError, match="Ob0 cannot be negative"):
cosmo_cls.Ob0.validate(cosmo, -1)
with pytest.raises(ValueError, match="baryonic density can not be larger"):
cosmo_cls.Ob0.validate(cosmo, cosmo.Om0 + 1)
# on the instance
assert cosmo.Ob0 is cosmo._Ob0
assert cosmo.Ob0 == 0.03
def test_init_Ob0(self, cosmo_cls, ba):
"""Test initialization for values of ``Ob0``."""
# test that it works with units
assert isinstance(ba.arguments["Ob0"], u.Quantity)
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Ob0 == ba.arguments["Ob0"]
# also without units
ba.arguments["Ob0"] = ba.arguments["Ob0"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Ob0 == ba.arguments["Ob0"]
# Setting param to 0 respects that. Note this test uses ``Ob()``.
ba.arguments["Ob0"] = 0.0
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Ob0 == 0.0
if not self.abstract_w:
assert u.allclose(cosmo.Ob(1), 0)
assert u.allclose(cosmo.Ob([0, 1, 2, 3]), [0, 0, 0, 0])
# Negative Ob0 errors
tba = copy.copy(ba)
tba.arguments["Ob0"] = -0.04
with pytest.raises(ValueError, match="Ob0 cannot be negative"):
cosmo_cls(*tba.args, **tba.kwargs)
# Ob0 > Om0 errors
tba.arguments["Ob0"] = tba.arguments["Om0"] + 0.1
with pytest.raises(ValueError, match="baryonic density can not be larger"):
cosmo_cls(*tba.args, **tba.kwargs)
# No baryons specified means baryon-specific methods fail.
tba = copy.copy(ba)
tba.arguments.pop("Ob0", None)
cosmo = cosmo_cls(*tba.args, **tba.kwargs)
with pytest.raises(ValueError):
cosmo.Ob(1)
# also means DM fraction is undefined
with pytest.raises(ValueError):
cosmo.Odm(1)
# The default value is None
assert cosmo_cls._init_signature.parameters["Ob0"].default is None
class TestFLRW(CosmologyTest,
ParameterH0TestMixin, ParameterOm0TestMixin, ParameterOde0TestMixin,
ParameterTcmb0TestMixin, ParameterNeffTestMixin, Parameterm_nuTestMixin,
ParameterOb0TestMixin):
"""Test :class:`astropy.cosmology.FLRW`."""
abstract_w = True
def setup_class(self):
"""
Setup for testing.
FLRW is abstract, so tests are done on a subclass.
"""
# make sure SubCosmology is known
_COSMOLOGY_CLASSES["SubFLRW"] = SubFLRW
self.cls = SubFLRW
self._cls_args = dict(H0=70 * u.km / u.s / u.Mpc, Om0=0.27 * u.one, Ode0=0.73 * u.one)
self.cls_kwargs = dict(Tcmb0=3.0 * u.K, Ob0=0.03 * u.one,
name=self.__class__.__name__, meta={"a": "b"})
def teardown_class(self):
super().teardown_class(self)
_COSMOLOGY_CLASSES.pop("SubFLRW", None)
@pytest.fixture(scope="class")
def nonflatcosmo(self):
"""A non-flat cosmology used in equivalence tests."""
return LambdaCDM(70, 0.4, 0.8)
# ===============================================================
# Method & Attribute Tests
def test_init(self, cosmo_cls):
"""Test initialization."""
super().test_init(cosmo_cls)
# TODO! tests for initializing calculated values, e.g. `h`
# TODO! transfer tests for initializing neutrinos
def test_init_Tcmb0_zeroing(self, cosmo_cls, ba):
"""Test if setting Tcmb0 parameter to 0 influences other parameters.
TODO: consider moving this test to ``FLRWSubclassTest``
"""
ba.arguments["Tcmb0"] = 0.0
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Ogamma0 == 0.0
assert cosmo.Onu0 == 0.0
if not self.abstract_w:
assert u.allclose(cosmo.Ogamma(1.5), [0, 0, 0, 0])
assert u.allclose(cosmo.Ogamma([0, 1, 2, 3]), [0, 0, 0, 0])
assert u.allclose(cosmo.Onu(1.5), [0, 0, 0, 0])
assert u.allclose(cosmo.Onu([0, 1, 2, 3]), [0, 0, 0, 0])
# ---------------------------------------------------------------
# Properties
def test_Odm0(self, cosmo_cls, cosmo):
"""Test property ``Odm0``."""
# on the class
assert isinstance(cosmo_cls.Odm0, property)
assert cosmo_cls.Odm0.fset is None # immutable
# on the instance
assert cosmo.Odm0 is cosmo._Odm0
# Odm0 can be None, if Ob0 is None. Otherwise DM = matter - baryons.
if cosmo.Ob0 is None:
assert cosmo.Odm0 is None
else:
assert np.allclose(cosmo.Odm0, cosmo.Om0 - cosmo.Ob0)
def test_Ok0(self, cosmo_cls, cosmo):
"""Test property ``Ok0``."""
# on the class
assert isinstance(cosmo_cls.Ok0, property)
assert cosmo_cls.Ok0.fset is None # immutable
# on the instance
assert cosmo.Ok0 is cosmo._Ok0
assert np.allclose(cosmo.Ok0, 1.0 - (cosmo.Om0 + cosmo.Ode0 + cosmo.Ogamma0 + cosmo.Onu0))
def test_is_flat(self, cosmo_cls, cosmo):
"""Test property ``is_flat``."""
# on the class
assert isinstance(cosmo_cls.is_flat, property)
assert cosmo_cls.is_flat.fset is None # immutable
# on the instance
assert isinstance(cosmo.is_flat, bool)
assert cosmo.is_flat is bool((cosmo.Ok0 == 0.0) and (cosmo.Otot0 == 1.0))
def test_Tnu0(self, cosmo_cls, cosmo):
"""Test property ``Tnu0``."""
# on the class
assert isinstance(cosmo_cls.Tnu0, property)
assert cosmo_cls.Tnu0.fset is None # immutable
# on the instance
assert cosmo.Tnu0 is cosmo._Tnu0
assert cosmo.Tnu0.unit == u.K
assert u.allclose(cosmo.Tnu0, 0.7137658555036082 * cosmo.Tcmb0, rtol=1e-5)
def test_has_massive_nu(self, cosmo_cls, cosmo):
"""Test property ``has_massive_nu``."""
# on the class
assert isinstance(cosmo_cls.has_massive_nu, property)
assert cosmo_cls.has_massive_nu.fset is None # immutable
# on the instance
if cosmo.Tnu0 == 0:
assert cosmo.has_massive_nu is False
else:
assert cosmo.has_massive_nu is cosmo._massivenu
def test_h(self, cosmo_cls, cosmo):
"""Test property ``h``."""
# on the class
assert isinstance(cosmo_cls.h, property)
assert cosmo_cls.h.fset is None # immutable
# on the instance
assert cosmo.h is cosmo._h
assert np.allclose(cosmo.h, cosmo.H0.value / 100.0)
def test_hubble_time(self, cosmo_cls, cosmo):
"""Test property ``hubble_time``."""
# on the class
assert isinstance(cosmo_cls.hubble_time, property)
assert cosmo_cls.hubble_time.fset is None # immutable
# on the instance
assert cosmo.hubble_time is cosmo._hubble_time
assert u.allclose(cosmo.hubble_time, (1 / cosmo.H0) << u.Gyr)
def test_hubble_distance(self, cosmo_cls, cosmo):
"""Test property ``hubble_distance``."""
# on the class
assert isinstance(cosmo_cls.hubble_distance, property)
assert cosmo_cls.hubble_distance.fset is None # immutable
# on the instance
assert cosmo.hubble_distance is cosmo._hubble_distance
assert cosmo.hubble_distance == (const.c / cosmo._H0).to(u.Mpc)
def test_critical_density0(self, cosmo_cls, cosmo):
"""Test property ``critical_density0``."""
# on the class
assert isinstance(cosmo_cls.critical_density0, property)
assert cosmo_cls.critical_density0.fset is None # immutable
# on the instance
assert cosmo.critical_density0 is cosmo._critical_density0
assert cosmo.critical_density0.unit == u.g / u.cm ** 3
cd0value = _critdens_const * (cosmo.H0.value * _H0units_to_invs) ** 2
assert cosmo.critical_density0.value == cd0value
def test_Ogamma0(self, cosmo_cls, cosmo):
"""Test property ``Ogamma0``."""
# on the class
assert isinstance(cosmo_cls.Ogamma0, property)
assert cosmo_cls.Ogamma0.fset is None # immutable
# on the instance
assert cosmo.Ogamma0 is cosmo._Ogamma0
# Ogamma cor \propto T^4/rhocrit
expect = _a_B_c2 * cosmo.Tcmb0.value ** 4 / cosmo.critical_density0.value
assert np.allclose(cosmo.Ogamma0, expect)
# check absolute equality to 0 if Tcmb0 is 0
if cosmo.Tcmb0 == 0:
assert cosmo.Ogamma0 == 0
def test_Onu0(self, cosmo_cls, cosmo):
"""Test property ``Onu0``."""
# on the class
assert isinstance(cosmo_cls.Onu0, property)
assert cosmo_cls.Onu0.fset is None # immutable
# on the instance
assert cosmo.Onu0 is cosmo._Onu0
# neutrino temperature <= photon temperature since the neutrinos
# decouple first.
if cosmo.has_massive_nu: # Tcmb0 > 0 & has massive
# check the expected formula
assert cosmo.Onu0 == cosmo.Ogamma0 * cosmo.nu_relative_density(0)
# a sanity check on on the ratio of neutrinos to photons
# technically it could be 1, but not for any of the tested cases.
assert cosmo.nu_relative_density(0) <= 1
elif cosmo.Tcmb0 == 0:
assert cosmo.Onu0 == 0
else:
# check the expected formula
assert cosmo.Onu0 == 0.22710731766 * cosmo._Neff * cosmo.Ogamma0
# and check compatibility with nu_relative_density
assert np.allclose(cosmo.nu_relative_density(0), 0.22710731766 * cosmo._Neff)
def test_Otot0(self, cosmo):
"""Test :attr:`astropy.cosmology.FLRW.Otot0`."""
assert cosmo.Otot0 == cosmo.Om0 + cosmo.Ogamma0 + cosmo.Onu0 + cosmo.Ode0 + cosmo.Ok0
# ---------------------------------------------------------------
# Methods
def test_w(self, cosmo):
"""Test abstract :meth:`astropy.cosmology.FLRW.w`."""
with pytest.raises(NotImplementedError, match="not implemented"):
cosmo.w(1)
def test_Otot(self, cosmo):
"""Test :meth:`astropy.cosmology.FLRW.Otot`."""
exception = NotImplementedError if HAS_SCIPY else ModuleNotFoundError
with pytest.raises(exception):
assert cosmo.Otot(1)
def test_efunc_vs_invefunc(self, cosmo):
"""
Test that efunc and inv_efunc give inverse values.
Here they just fail b/c no ``w(z)`` or no scipy.
"""
exception = NotImplementedError if HAS_SCIPY else ModuleNotFoundError
with pytest.raises(exception):
cosmo.efunc(0.5)
with pytest.raises(exception):
cosmo.inv_efunc(0.5)
# ---------------------------------------------------------------
# from Cosmology
def test_clone_change_param(self, cosmo):
"""Test method ``.clone()`` changing a(many) Parameter(s)."""
super().test_clone_change_param(cosmo)
# don't change any values
kwargs = cosmo._init_arguments.copy()
kwargs.pop("name", None) # make sure not setting name
kwargs.pop("meta", None) # make sure not setting name
c = cosmo.clone(**kwargs)
assert c.__class__ == cosmo.__class__
assert c == cosmo
# change ``H0``
# Note that H0 affects Ode0 because it changes Ogamma0
c = cosmo.clone(H0=100)
assert c.__class__ == cosmo.__class__
assert c.name == cosmo.name + " (modified)"
assert c.H0.value == 100
for n in (set(cosmo.__parameters__) - {"H0"}):
v = getattr(c, n)
if v is None:
assert v is getattr(cosmo, n)
else:
assert u.allclose(v, getattr(cosmo, n), atol=1e-4 * getattr(v, "unit", 1))
assert not u.allclose(c.Ogamma0, cosmo.Ogamma0)
assert not u.allclose(c.Onu0, cosmo.Onu0)
# change multiple things
c = cosmo.clone(name="new name", H0=100, Tcmb0=2.8, meta=dict(zz="tops"))
assert c.__class__ == cosmo.__class__
assert c.name == "new name"
assert c.H0.value == 100
assert c.Tcmb0.value == 2.8
assert c.meta == {**cosmo.meta, **dict(zz="tops")}
for n in (set(cosmo.__parameters__) - {"H0", "Tcmb0"}):
v = getattr(c, n)
if v is None:
assert v is getattr(cosmo, n)
else:
assert u.allclose(v, getattr(cosmo, n), atol=1e-4 * getattr(v, "unit", 1))
assert not u.allclose(c.Ogamma0, cosmo.Ogamma0)
assert not u.allclose(c.Onu0, cosmo.Onu0)
assert not u.allclose(c.Tcmb0.value, cosmo.Tcmb0.value)
def test_is_equivalent(self, cosmo):
"""Test :meth:`astropy.cosmology.FLRW.is_equivalent`."""
super().test_is_equivalent(cosmo) # pass to CosmologySubclassTest
# test against a FlatFLRWMixin
# case (3) in FLRW.is_equivalent
if isinstance(cosmo, FlatLambdaCDM):
assert cosmo.is_equivalent(Planck18)
assert Planck18.is_equivalent(cosmo)
else:
assert not cosmo.is_equivalent(Planck18)
assert not Planck18.is_equivalent(cosmo)
class FLRWSubclassTest(TestFLRW):
"""
Test subclasses of :class:`astropy.cosmology.FLRW`.
This is broken away from ``TestFLRW``, because ``FLRW`` is an ABC and
subclasses must override some methods.
"""
abstract_w = False
@abc.abstractmethod
def setup_class(self):
"""Setup for testing."""
super().setup_class(self)
# ===============================================================
# Method & Attribute Tests
_FLRW_redshift_methods = get_redshift_methods(FLRW, include_private=True, include_z2=False)
@pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed")
@pytest.mark.parametrize("z, exc", invalid_zs)
@pytest.mark.parametrize('method', _FLRW_redshift_methods)
def test_redshift_method_bad_input(self, cosmo, method, z, exc):
"""Test all the redshift methods for bad input."""
with pytest.raises(exc):
getattr(cosmo, method)(z)
@pytest.mark.parametrize("z", valid_zs)
@abc.abstractmethod
def test_w(self, cosmo, z):
"""Test :meth:`astropy.cosmology.FLRW.w`.
Since ``w`` is abstract, each test class needs to define further tests.
"""
# super().test_w(cosmo, z) # NOT b/c abstract `w(z)`
w = cosmo.w(z)
assert np.shape(w) == np.shape(z) # test same shape
assert u.Quantity(w).unit == u.one # test no units or dimensionless
# -------------------------------------------
@pytest.mark.parametrize("z", valid_zs)
def test_Otot(self, cosmo, z):
"""Test :meth:`astropy.cosmology.FLRW.Otot`."""
# super().test_Otot(cosmo) # NOT b/c abstract `w(z)`
assert np.allclose(
cosmo.Otot(z),
cosmo.Om(z) + cosmo.Ogamma(z) + cosmo.Onu(z) + cosmo.Ode(z) + cosmo.Ok(z))
# ---------------------------------------------------------------
def test_efunc_vs_invefunc(self, cosmo):
"""Test that ``efunc`` and ``inv_efunc`` give inverse values.
Note that the test doesn't need scipy because it doesn't need to call
``de_density_scale``.
"""
# super().test_efunc_vs_invefunc(cosmo) # NOT b/c abstract `w(z)`
z0 = 0.5
z = np.array([0.5, 1.0, 2.0, 5.0])
assert np.allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert np.allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
# -----------------------------------------------------------------------------
class ParameterFlatOde0TestMixin(ParameterOde0TestMixin):
"""Tests for `astropy.cosmology.Parameter` Ode0 on a flat Cosmology.
This will augment or override some tests in ``ParameterOde0TestMixin``.
Ode0 is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_Parameter_Ode0(self, cosmo_cls):
"""Test Parameter ``Ode0`` on the class."""
super().test_Parameter_Ode0(cosmo_cls)
assert cosmo_cls.Ode0.derived in (True, np.True_)
def test_Ode0(self, cosmo):
"""Test no-longer-Parameter ``Ode0``."""
assert cosmo.Ode0 is cosmo._Ode0
assert cosmo.Ode0 == 1.0 - (cosmo.Om0 + cosmo.Ogamma0 + cosmo.Onu0)
def test_init_Ode0(self, cosmo_cls, ba):
"""Test initialization for values of ``Ode0``."""
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Ode0 == 1.0 - (cosmo.Om0 + cosmo.Ogamma0 + cosmo.Onu0 + cosmo.Ok0)
# Ode0 is not in the signature
with pytest.raises(TypeError, match="Ode0"):
cosmo_cls(*ba.args, **ba.kwargs, Ode0=1)
class FlatFLRWMixinTest(FlatCosmologyMixinTest, ParameterFlatOde0TestMixin):
"""Tests for :class:`astropy.cosmology.FlatFLRWMixin` subclasses.
E.g to use this class::
class TestFlatSomeFLRW(FlatFLRWMixinTest, TestSomeFLRW):
...
"""
def setup_class(self):
"""Setup for testing.
Set up as for regular FLRW test class, but remove dark energy component
since flat cosmologies are forbidden Ode0 as an argument,
see ``test_init_subclass``.
"""
super().setup_class(self)
self._cls_args.pop("Ode0")
# ===============================================================
# Method & Attribute Tests
# ---------------------------------------------------------------
# class-level
def test_init_subclass(self, cosmo_cls):
"""Test initializing subclass, mostly that can't have Ode0 in init."""
super().test_init_subclass(cosmo_cls)
with pytest.raises(TypeError, match="subclasses of"):
class HASOde0SubClass(cosmo_cls):
def __init__(self, Ode0):
pass
_COSMOLOGY_CLASSES.pop(HASOde0SubClass.__qualname__, None)
# ---------------------------------------------------------------
# instance-level
def test_init(self, cosmo_cls):
super().test_init(cosmo_cls)
cosmo = cosmo_cls(*self.cls_args, **self.cls_kwargs)
assert cosmo._Ok0 == 0.0
assert cosmo._Ode0 == 1.0 - (cosmo._Om0 + cosmo._Ogamma0 + cosmo._Onu0 + cosmo._Ok0)
def test_Ok0(self, cosmo_cls, cosmo):
"""Test property ``Ok0``."""
super().test_Ok0(cosmo_cls, cosmo)
# for flat cosmologies, Ok0 is not *close* to 0, it *is* 0
assert cosmo.Ok0 == 0.0
def test_Otot0(self, cosmo):
"""Test :attr:`astropy.cosmology.FLRW.Otot0`. Should always be 1."""
super().test_Otot0(cosmo)
# for flat cosmologies, Otot0 is not *close* to 1, it *is* 1
assert cosmo.Otot0 == 1.0
@pytest.mark.parametrize("z", valid_zs)
def test_Otot(self, cosmo, z):
"""Test :meth:`astropy.cosmology.FLRW.Otot`. Should always be 1."""
super().test_Otot(cosmo, z)
# for flat cosmologies, Otot is 1, within precision.
assert u.allclose(cosmo.Otot(z), 1.0)
@pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed")
@pytest.mark.parametrize("z, exc", invalid_zs)
@pytest.mark.parametrize('method', FLRWSubclassTest._FLRW_redshift_methods - {"Otot"})
def test_redshift_method_bad_input(self, cosmo, method, z, exc):
"""Test all the redshift methods for bad input."""
super().test_redshift_method_bad_input(cosmo, method, z, exc)
# ---------------------------------------------------------------
def test_clone_to_nonflat_change_param(self, cosmo):
"""Test method ``.clone()`` changing a(many) Parameter(s)."""
super().test_clone_to_nonflat_change_param(cosmo)
# change Ode0, without non-flat
with pytest.raises(TypeError):
cosmo.clone(Ode0=1)
# change to non-flat
nc = cosmo.clone(to_nonflat=True, Ode0=cosmo.Ode0)
assert isinstance(nc, cosmo._nonflat_cls_)
assert nc == cosmo.nonflat
nc = cosmo.clone(to_nonflat=True, Ode0=1)
assert nc.Ode0 == 1.0
assert nc.name == cosmo.name + " (modified)"
# ---------------------------------------------------------------
def test_is_equivalent(self, cosmo, nonflatcosmo):
"""Test :meth:`astropy.cosmology.FLRW.is_equivalent`."""
super().test_is_equivalent(cosmo) # pass to TestFLRW
# against non-flat Cosmology
assert not cosmo.is_equivalent(nonflatcosmo)
assert not nonflatcosmo.is_equivalent(cosmo)
# non-flat version of class
nonflat_cosmo_cls = cosmo.__class__.mro()[3]
# keys check in `test_is_equivalent_nonflat_class_different_params`
# non-flat
nonflat = nonflat_cosmo_cls(*self.cls_args, Ode0=0.9, **self.cls_kwargs)
assert not nonflat.is_equivalent(cosmo)
assert not cosmo.is_equivalent(nonflat)
# flat, but not FlatFLRWMixin
flat = nonflat_cosmo_cls(*self.cls_args,
Ode0=1.0 - cosmo.Om0 - cosmo.Ogamma0 - cosmo.Onu0,
**self.cls_kwargs)
flat._Ok0 = 0.0
assert flat.is_equivalent(cosmo)
assert cosmo.is_equivalent(flat)
def test_repr(self, cosmo_cls, cosmo):
"""
Test method ``.__repr__()``. Skip non-flat superclass test.
e.g. `TestFlatLambdaCDDM` -> `FlatFLRWMixinTest`
vs `TestFlatLambdaCDDM` -> `TestLambdaCDDM` -> `FlatFLRWMixinTest`
"""
FLRWSubclassTest.test_repr(self, cosmo_cls, cosmo)
# test eliminated Ode0 from parameters
assert "Ode0" not in repr(cosmo)
|
d01f6d86c79e60c9ebb3dc03c4d2cad5630b6eb235bf2bb29679cf7110e43ea4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Astropy is a package intended to contain core functionality and some
common tools needed for performing astronomy and astrophysics research with
Python. It also provides an index for other astronomy packages and tools for
managing them.
"""
import os
import sys
from .version import version as __version__
def _is_astropy_source(path=None):
"""
Returns whether the source for this module is directly in an astropy
source distribution or checkout.
"""
# If this __init__.py file is in ./astropy/ then import is within a source
# dir .astropy-root is a file distributed with the source, but that should
# not installed
if path is None:
path = os.path.join(os.path.dirname(__file__), os.pardir)
elif os.path.isfile(path):
path = os.path.dirname(path)
source_dir = os.path.abspath(path)
return os.path.exists(os.path.join(source_dir, '.astropy-root'))
# The location of the online documentation for astropy
# This location will normally point to the current released version of astropy
if 'dev' in __version__:
online_docs_root = 'https://docs.astropy.org/en/latest/'
else:
online_docs_root = f'https://docs.astropy.org/en/{__version__}/'
from . import config as _config # noqa: E402
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy`.
"""
unicode_output = _config.ConfigItem(
False,
'When True, use Unicode characters when outputting values, and '
'displaying widgets at the console.')
use_color = _config.ConfigItem(
sys.platform != 'win32',
'When True, use ANSI color escape sequences when writing to the console.',
aliases=['astropy.utils.console.USE_COLOR', 'astropy.logger.USE_COLOR'])
max_lines = _config.ConfigItem(
None,
description='Maximum number of lines in the display of pretty-printed '
'objects. If not provided, try to determine automatically from the '
'terminal size. Negative numbers mean no limit.',
cfgtype='integer(default=None)',
aliases=['astropy.table.pprint.max_lines'])
max_width = _config.ConfigItem(
None,
description='Maximum number of characters per line in the display of '
'pretty-printed objects. If not provided, try to determine '
'automatically from the terminal size. Negative numbers mean no '
'limit.',
cfgtype='integer(default=None)',
aliases=['astropy.table.pprint.max_width'])
conf = Conf()
# Define a base ScienceState for configuring constants and units
from .utils.state import ScienceState # noqa: E402
class base_constants_version(ScienceState):
"""
Base class for the real version-setters below
"""
_value = 'test'
_versions = dict(test='test')
@classmethod
def validate(cls, value):
if value not in cls._versions:
raise ValueError(f'Must be one of {list(cls._versions.keys())}')
return cls._versions[value]
@classmethod
def set(cls, value):
"""
Set the current constants value.
"""
import sys
if 'astropy.units' in sys.modules:
raise RuntimeError('astropy.units is already imported')
if 'astropy.constants' in sys.modules:
raise RuntimeError('astropy.constants is already imported')
return super().set(value)
class physical_constants(base_constants_version):
"""
The version of physical constants to use
"""
# Maintainers: update when new constants are added
_value = 'codata2018'
_versions = dict(codata2018='codata2018', codata2014='codata2014',
codata2010='codata2010', astropyconst40='codata2018',
astropyconst20='codata2014', astropyconst13='codata2010')
class astronomical_constants(base_constants_version):
"""
The version of astronomical constants to use
"""
# Maintainers: update when new constants are added
_value = 'iau2015'
_versions = dict(iau2015='iau2015', iau2012='iau2012',
astropyconst40='iau2015', astropyconst20='iau2015',
astropyconst13='iau2012')
# Create the test() function
from .tests.runner import TestRunner # noqa: E402
test = TestRunner.make_test_runner_in(__path__[0]) # noqa: F821
# if we are *not* in setup mode, import the logger and possibly populate the
# configuration file with the defaults
def _initialize_astropy():
try:
from .utils import _compiler # noqa: F401
except ImportError:
if _is_astropy_source():
raise ImportError('You appear to be trying to import astropy from '
'within a source checkout or from an editable '
'installation without building the extension '
'modules first. Either run:\n\n'
' pip install -e .\n\nor\n\n'
' python setup.py build_ext --inplace\n\n'
'to make sure the extension modules are built ')
else:
# Outright broken installation, just raise standard error
raise
# Set the bibtex entry to the article referenced in CITATION.
def _get_bibtex():
citation_file = os.path.join(os.path.dirname(__file__), 'CITATION')
with open(citation_file) as citation:
refs = citation.read().split('@ARTICLE')[1:]
if len(refs) == 0:
return ''
bibtexreference = f'@ARTICLE{refs[0]}'
return bibtexreference
__citation__ = __bibtex__ = _get_bibtex()
from .logger import _init_log, _teardown_log # noqa: E402, F401
log = _init_log()
_initialize_astropy()
from .utils.misc import find_api_page # noqa: E402, F401
def online_help(query):
"""
Search the online Astropy documentation for the given query.
Opens the results in the default web browser. Requires an active
Internet connection.
Parameters
----------
query : str
The search query.
"""
import webbrowser
from urllib.parse import urlencode
version = __version__
if 'dev' in version:
version = 'latest'
else:
version = 'v' + version
url = f"https://docs.astropy.org/en/{version}/search.html?{urlencode({'q': query})}"
webbrowser.open(url)
__dir_inc__ = ['__version__', '__githash__',
'__bibtex__', 'test', 'log', 'find_api_page', 'online_help',
'online_docs_root', 'conf', 'physical_constants',
'astronomical_constants']
from types import ModuleType as __module_type__ # noqa: E402
# Clean up top-level namespace--delete everything that isn't in __dir_inc__
# or is a magic attribute, and that isn't a submodule of this package
for varname in dir():
if not ((varname.startswith('__') and varname.endswith('__')) or
varname in __dir_inc__ or
(varname[0] != '_' and
isinstance(locals()[varname], __module_type__) and
locals()[varname].__name__.startswith(__name__ + '.'))):
# The last clause in the the above disjunction deserves explanation:
# When using relative imports like ``from .. import config``, the
# ``config`` variable is automatically created in the namespace of
# whatever module ``..`` resolves to (in this case astropy). This
# happens a few times just in the module setup above. This allows
# the cleanup to keep any public submodules of the astropy package
del locals()[varname]
del varname, __module_type__
|
a819a7c7d10835366d1bc9634ca23a7a7bbb50c8c8a2d96e453b2ded3c65740c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
#
# Astropy documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this file.
#
# All configuration values have a default. Some values are defined in
# the global Astropy configuration which is loaded here before anything else.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('..'))
# IMPORTANT: the above commented section was generated by sphinx-quickstart, but
# is *NOT* appropriate for astropy or Astropy affiliated packages. It is left
# commented out with this explanation to make it clear why this should not be
# done. If the sys.path entry above is added, when the astropy.sphinx.conf
# import occurs, it will import the *source* version of astropy instead of the
# version installed (if invoked as "make html" or directly with sphinx), or the
# version in the build directory.
# Thus, any C-extensions that are needed to build the documentation will *not*
# be accessible, and the documentation will not build correctly.
# See sphinx_astropy.conf for which values are set there.
import os
import sys
import configparser
from datetime import datetime
from importlib import metadata
import doctest
from packaging.requirements import Requirement
from packaging.specifiers import SpecifierSet
# -- Check for missing dependencies -------------------------------------------
missing_requirements = {}
for line in metadata.requires('astropy'):
if 'extra == "docs"' in line:
req = Requirement(line.split(';')[0])
req_package = req.name.lower()
req_specifier = str(req.specifier)
try:
version = metadata.version(req_package)
except metadata.PackageNotFoundError:
missing_requirements[req_package] = req_specifier
if version not in SpecifierSet(req_specifier, prereleases=True):
missing_requirements[req_package] = req_specifier
if missing_requirements:
print('The following packages could not be found and are required to '
'build the documentation:')
for key, val in missing_requirements.items():
print(f' * {key} {val}')
print('Please install the "docs" requirements.')
sys.exit(1)
from sphinx_astropy.conf.v1 import * # noqa
# -- Plot configuration -------------------------------------------------------
plot_rcparams = {}
plot_rcparams['figure.figsize'] = (6, 6)
plot_rcparams['savefig.facecolor'] = 'none'
plot_rcparams['savefig.bbox'] = 'tight'
plot_rcparams['axes.labelsize'] = 'large'
plot_rcparams['figure.subplot.hspace'] = 0.5
plot_apply_rcparams = True
plot_html_show_source_link = False
plot_formats = ['png', 'svg', 'pdf']
# Don't use the default - which includes a numpy and matplotlib import
plot_pre_code = ""
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.7'
# To perform a Sphinx version check that needs to be more specific than
# major.minor, call `check_sphinx_version("X.Y.Z")` here.
check_sphinx_version("1.2.1") # noqa: F405
# The intersphinx_mapping in sphinx_astropy.sphinx refers to astropy for
# the benefit of other packages who want to refer to objects in the
# astropy core. However, we don't want to cyclically reference astropy in its
# own build so we remove it here.
del intersphinx_mapping['astropy'] # noqa: F405
# add any custom intersphinx for astropy
intersphinx_mapping['astropy-dev'] = ('https://docs.astropy.org/en/latest/', None) # noqa: F405
intersphinx_mapping['pyerfa'] = ('https://pyerfa.readthedocs.io/en/stable/', None) # noqa: F405
intersphinx_mapping['pytest'] = ('https://docs.pytest.org/en/stable/', None) # noqa: F405
intersphinx_mapping['ipython'] = ('https://ipython.readthedocs.io/en/stable/', None) # noqa: F405
intersphinx_mapping['pandas'] = ('https://pandas.pydata.org/pandas-docs/stable/', None) # noqa: F405, E501
intersphinx_mapping['sphinx_automodapi'] = ('https://sphinx-automodapi.readthedocs.io/en/stable/', None) # noqa: F405, E501
intersphinx_mapping['packagetemplate'] = ('https://docs.astropy.org/projects/package-template/en/latest/', None) # noqa: F405, E501
intersphinx_mapping['h5py'] = ('https://docs.h5py.org/en/stable/', None) # noqa: F405
intersphinx_mapping['asdf-astropy'] = ('https://asdf-astropy.readthedocs.io/en/latest/', None) # noqa: F405
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns.append('_templates') # noqa: F405
exclude_patterns.append('changes') # noqa: F405
exclude_patterns.append('_pkgtemplate.rst') # noqa: F405
exclude_patterns.append('**/*.inc.rst') # .inc.rst mean *include* files, don't have sphinx process them # noqa: F405, E501
# Add any paths that contain templates here, relative to this directory.
if 'templates_path' not in locals(): # in case parent conf.py defines it
templates_path = []
templates_path.append('_templates')
extensions += ["sphinx_changelog"] # noqa: F405
# Grab minversion from setup.cfg
setup_cfg = configparser.ConfigParser()
setup_cfg.read(os.path.join(os.path.pardir, 'setup.cfg'))
__minimum_python_version__ = setup_cfg['options']['python_requires'].replace('>=', '')
project = 'Astropy'
min_versions = {}
for line in metadata.requires('astropy'):
req = Requirement(line.split(';')[0])
min_versions[req.name.lower()] = str(req.specifier)
# This is added to the end of RST files - a good place to put substitutions to
# be used globally.
with open("common_links.txt") as cl:
rst_epilog += cl.read().format(minimum_python=__minimum_python_version__,
**min_versions)
# Manually register doctest options since matplotlib 3.5 messed up allowing them
# from pytest-doctestplus
IGNORE_OUTPUT = doctest.register_optionflag('IGNORE_OUTPUT')
REMOTE_DATA = doctest.register_optionflag('REMOTE_DATA')
FLOAT_CMP = doctest.register_optionflag('FLOAT_CMP')
# Whether to create cross-references for the parameter types in the
# Parameters, Other Parameters, Returns and Yields sections of the docstring.
numpydoc_xref_param_type = True
# Words not to cross-reference. Most likely, these are common words used in
# parameter type descriptions that may be confused for classes of the same
# name. The base set comes from sphinx-astropy. We add more here.
numpydoc_xref_ignore.update({
"mixin",
"Any", # aka something that would be annotated with `typing.Any`
# needed in subclassing numpy # TODO! revisit
"Arguments", "Path",
# TODO! not need to ignore.
"flag", "bits",
})
# Mappings to fully qualified paths (or correct ReST references) for the
# aliases/shortcuts used when specifying the types of parameters.
# Numpy provides some defaults
# https://github.com/numpy/numpydoc/blob/b352cd7635f2ea7748722f410a31f937d92545cc/numpydoc/xref.py#L62-L94
# and a base set comes from sphinx-astropy.
# so here we mostly need to define Astropy-specific x-refs
numpydoc_xref_aliases.update({
# python & adjacent
"Any": "`~typing.Any`",
"file-like": ":term:`python:file-like object`",
"file": ":term:`python:file object`",
"path-like": ":term:`python:path-like object`",
"module": ":term:`python:module`",
"buffer-like": ":term:buffer-like",
"hashable": ":term:`python:hashable`",
# for matplotlib
"color": ":term:`color`",
# for numpy
"ints": ":class:`python:int`",
# for astropy
"number": ":term:`number`",
"Representation": ":class:`~astropy.coordinates.BaseRepresentation`",
"writable": ":term:`writable file-like object`",
"readable": ":term:`readable file-like object`",
"BaseHDU": ":doc:`HDU </io/fits/api/hdus>`"
})
# Add from sphinx-astropy 1) glossary aliases 2) physical types.
numpydoc_xref_aliases.update(numpydoc_xref_astropy_aliases)
# -- Project information ------------------------------------------------------
author = 'The Astropy Developers'
copyright = f'2011–{datetime.utcnow().year}, ' + author
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# The full version, including alpha/beta/rc tags.
release = metadata.version(project)
# The short X.Y version.
version = '.'.join(release.split('.')[:2])
# Only include dev docs in dev version.
dev = 'dev' in release
if not dev:
exclude_patterns.append('development/*') # noqa: F405
exclude_patterns.append('testhelpers.rst') # noqa: F405
# -- Options for the module index ---------------------------------------------
modindex_common_prefix = ['astropy.']
# -- Options for HTML output ---------------------------------------------------
# A NOTE ON HTML THEMES
#
# The global astropy configuration uses a custom theme,
# 'bootstrap-astropy', which is installed along with astropy. The
# theme has options for controlling the text of the logo in the upper
# left corner. This is how you would specify the options in order to
# override the theme defaults (The following options *are* the
# defaults, so we do not actually need to set them here.)
# html_theme_options = {
# 'logotext1': 'astro', # white, semi-bold
# 'logotext2': 'py', # orange, light
# 'logotext3': ':docs' # white, light
# }
# A different theme can be used, or other parts of this theme can be
# modified, by overriding some of the variables set in the global
# configuration. The variables set in the global configuration are
# listed below, commented out.
# Add any paths that contain custom themes here, relative to this directory.
# To use a different custom theme, add the directory containing the theme.
# html_theme_path = []
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes. To override the custom theme, set this to the
# name of a builtin theme or the name of a custom theme in html_theme_path.
# html_theme = None
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = ''
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = ''
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = f'{project} v{release}'
# Output file base name for HTML help builder.
htmlhelp_basename = project + 'doc'
# A dictionary of values to pass into the template engine’s context for all pages.
html_context = {
'to_be_indexed': ['stable', 'latest'],
'is_development': dev
}
# -- Options for LaTeX output --------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [('index', project + '.tex', project + ' Documentation',
author, 'manual')]
latex_logo = '_static/astropy_logo.pdf'
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [('index', project.lower(), project + ' Documentation',
[author], 1)]
# Setting this URL is requited by sphinx-astropy
github_issues_url = 'https://github.com/astropy/astropy/issues/'
edit_on_github_branch = 'main'
# Enable nitpicky mode - which ensures that all references in the docs
# resolve.
nitpicky = True
# This is not used. See docs/nitpick-exceptions file for the actual listing.
nitpick_ignore = []
for line in open('nitpick-exceptions'):
if line.strip() == "" or line.startswith("#"):
continue
dtype, target = line.split(None, 1)
target = target.strip()
nitpick_ignore.append((dtype, target))
# -- Options for the Sphinx gallery -------------------------------------------
try:
import warnings
import sphinx_gallery # noqa: F401
extensions += ["sphinx_gallery.gen_gallery"] # noqa: F405
sphinx_gallery_conf = {
'backreferences_dir': 'generated/modules', # path to store the module using example template # noqa: E501
'filename_pattern': '^((?!skip_).)*$', # execute all examples except those that start with "skip_" # noqa: E501
'examples_dirs': f'..{os.sep}examples', # path to the examples scripts
'gallery_dirs': 'generated/examples', # path to save gallery generated examples
'reference_url': {
'astropy': None,
'matplotlib': 'https://matplotlib.org/stable/',
'numpy': 'https://numpy.org/doc/stable/',
},
'abort_on_example_error': True
}
# Filter out backend-related warnings as described in
# https://github.com/sphinx-gallery/sphinx-gallery/pull/564
warnings.filterwarnings("ignore", category=UserWarning,
message='Matplotlib is currently using agg, which is a'
' non-GUI backend, so cannot show the figure.')
except ImportError:
sphinx_gallery = None
# -- Options for linkcheck output -------------------------------------------
linkcheck_retry = 5
linkcheck_ignore = ['https://journals.aas.org/manuscript-preparation/',
'https://maia.usno.navy.mil/',
'https://www.usno.navy.mil/USNO/time/gps/usno-gps-time-transfer',
'https://aa.usno.navy.mil/publications/docs/Circular_179.php',
'http://data.astropy.org',
'https://doi.org/10.1017/S0251107X00002406', # internal server error
'https://doi.org/10.1017/pasa.2013.31', # internal server error
'https://pyfits.readthedocs.io/en/v3.2.1/', # defunct page in CHANGES.rst
r'https://github\.com/astropy/astropy/(?:issues|pull)/\d+']
linkcheck_timeout = 180
linkcheck_anchors = False
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
html_extra_path = ['robots.txt']
def rstjinja(app, docname, source):
"""Render pages as a jinja template to hide/show dev docs. """
# Make sure we're outputting HTML
if app.builder.format != 'html':
return
files_to_render = ["index", "install"]
if docname in files_to_render:
print(f"Jinja rendering {docname}")
rendered = app.builder.templates.render_string(
source[0], app.config.html_context)
source[0] = rendered
def resolve_astropy_and_dev_reference(app, env, node, contnode):
"""
Reference targets for ``astropy:`` and ``astropy-dev:`` are special cases.
Documentation links in astropy can be set up as intersphinx links so that
affiliate packages do not have to override the docstrings when building
the docs.
If we are building the development docs it is a local ref targeting the
label ``astropy-dev:<label>``, but for stable docs it should be an
intersphinx resolution to the development docs.
See https://github.com/astropy/astropy/issues/11366
"""
# should the node be processed?
reftarget = node.get('reftarget') # str or None
if str(reftarget).startswith('astropy:'):
# This allows Astropy to use intersphinx links to itself and have
# them resolve to local links. Downstream packages will see intersphinx.
# TODO! deprecate this if sphinx-doc/sphinx/issues/9169 is implemented.
process, replace = True, 'astropy:'
elif dev and str(reftarget).startswith('astropy-dev:'):
process, replace = True, 'astropy-dev:'
else:
process, replace = False, ''
# make link local
if process:
reftype = node.get('reftype')
refdoc = node.get('refdoc', app.env.docname)
# convert astropy intersphinx targets to local links.
# there are a few types of intersphinx link patters, as described in
# https://docs.readthedocs.io/en/stable/guides/intersphinx.html
reftarget = reftarget.replace(replace, '')
if reftype == "doc": # also need to replace the doc link
node.replace_attr("reftarget", reftarget)
# Delegate to the ref node's original domain/target (typically :ref:)
try:
domain = app.env.domains[node['refdomain']]
return domain.resolve_xref(app.env, refdoc, app.builder,
reftype, reftarget, node, contnode)
except Exception:
pass
# Otherwise return None which should delegate to intersphinx
def setup(app):
if sphinx_gallery is None:
msg = ('The sphinx_gallery extension is not installed, so the '
'gallery will not be built. You will probably see '
'additional warnings about undefined references due '
'to this.')
try:
app.warn(msg)
except AttributeError:
# Sphinx 1.6+
from sphinx.util import logging
logger = logging.getLogger(__name__)
logger.warning(msg)
# Generate the page from Jinja template
app.connect("source-read", rstjinja)
# Set this to higher priority than intersphinx; this way when building
# dev docs astropy-dev: targets will go to the local docs instead of the
# intersphinx mapping
app.connect("missing-reference", resolve_astropy_and_dev_reference,
priority=400)
|
8c462b979fc22b71353c58b42f33cd78f1ef8b91f025f52ff7e1da9268ecc436 | """
========================
Title of Example
========================
This example <verb> <active tense> <does something>.
The example uses <packages> to <do something> and <other package> to <do other
thing>. Include links to referenced packages like this: `astropy.io.fits` to
show the astropy.io.fits or like this `~astropy.io.fits`to show just 'fits'
*By: <names>*
*License: BSD*
"""
##############################################################################
# Make print work the same in all versions of Python, set up numpy,
# matplotlib, and use a nicer set of plot parameters:
import numpy as np
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
# uncomment if including figures:
# import matplotlib.pyplot as plt
# from astropy.visualization import astropy_mpl_style
# plt.style.use(astropy_mpl_style)
##############################################################################
# This code block is executed, although it produces no output. Lines starting
# with a simple hash are code comment and get treated as part of the code
# block. To include this new comment string we started the new block with a
# long line of hashes.
#
# The sphinx-gallery parser will assume everything after this splitter and that
# continues to start with a **comment hash and space** (respecting code style)
# is text that has to be rendered in
# html format. Keep in mind to always keep your comments always together by
# comment hashes. That means to break a paragraph you still need to comment
# that line break.
#
# In this example the next block of code produces some plotable data. Code is
# executed, figure is saved and then code is presented next, followed by the
# inlined figure.
x = np.linspace(-np.pi, np.pi, 300)
xx, yy = np.meshgrid(x, x)
z = np.cos(xx) + np.cos(yy)
plt.figure()
plt.imshow(z)
plt.colorbar()
plt.xlabel('$x$')
plt.ylabel('$y$')
###########################################################################
# Again it is possible to continue the discussion with a new Python string. This
# time to introduce the next code block generates 2 separate figures.
plt.figure()
plt.imshow(z, cmap=plt.cm.get_cmap('hot'))
plt.figure()
plt.imshow(z, cmap=plt.cm.get_cmap('Spectral'), interpolation='none')
##########################################################################
# There's some subtle differences between rendered html rendered comment
# strings and code comment strings which I'll demonstrate below. (Some of this
# only makes sense if you look at the
# :download:`raw Python script <plot_notebook.py>`)
#
# Comments in comment blocks remain nested in the text.
def dummy():
"""Dummy function to make sure docstrings don't get rendered as text"""
pass
# Code comments not preceded by the hash splitter are left in code blocks.
string = """
Triple-quoted string which tries to break parser but doesn't.
"""
############################################################################
# Output of the script is captured:
print('Some output from Python')
############################################################################
# Finally, I'll call ``show`` at the end just so someone running the Python
# code directly will see the plots; this is not necessary for creating the docs
plt.show()
|
28adfc4c66c336922ebae96b7f968744b732c1d13a8b047efce5dfcf2a62417e | """
========================================================================
Transforming positions and velocities to and from a Galactocentric frame
========================================================================
This document shows a few examples of how to use and customize the
`~astropy.coordinates.Galactocentric` frame to transform Heliocentric sky
positions, distance, proper motions, and radial velocities to a Galactocentric,
Cartesian frame, and the same in reverse.
The main configurable parameters of the `~astropy.coordinates.Galactocentric`
frame control the position and velocity of the solar system barycenter within
the Galaxy. These are specified by setting the ICRS coordinates of the
Galactic center, the distance to the Galactic center (the sun-galactic center
line is always assumed to be the x-axis of the Galactocentric frame), and the
Cartesian 3-velocity of the sun in the Galactocentric frame. We'll first
demonstrate how to customize these values, then show how to set the solar motion
instead by inputting the proper motion of Sgr A*.
Note that, for brevity, we may refer to the solar system barycenter as just "the
sun" in the examples below.
*By: Adrian Price-Whelan*
*License: BSD*
"""
##############################################################################
# Make `print` work the same in all versions of Python, set up numpy,
# matplotlib, and use a nicer set of plot parameters:
import numpy as np
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
##############################################################################
# Import the necessary astropy subpackages
import astropy.coordinates as coord
import astropy.units as u
##############################################################################
# Let's first define a barycentric coordinate and velocity in the ICRS frame.
# We'll use the data for the star HD 39881 from the `Simbad
# <https://simbad.u-strasbg.fr/simbad/>`_ database:
c1 = coord.SkyCoord(ra=89.014303*u.degree, dec=13.924912*u.degree,
distance=(37.59*u.mas).to(u.pc, u.parallax()),
pm_ra_cosdec=372.72*u.mas/u.yr,
pm_dec=-483.69*u.mas/u.yr,
radial_velocity=0.37*u.km/u.s,
frame='icrs')
##############################################################################
# This is a high proper-motion star; suppose we'd like to transform its position
# and velocity to a Galactocentric frame to see if it has a large 3D velocity
# as well. To use the Astropy default solar position and motion parameters, we
# can simply do:
gc1 = c1.transform_to(coord.Galactocentric)
##############################################################################
# From here, we can access the components of the resulting
# `~astropy.coordinates.Galactocentric` instance to see the 3D Cartesian
# velocity components:
print(gc1.v_x, gc1.v_y, gc1.v_z)
##############################################################################
# The default parameters for the `~astropy.coordinates.Galactocentric` frame
# are detailed in the linked documentation, but we can modify the most commonly
# changes values using the keywords ``galcen_distance``, ``galcen_v_sun``, and
# ``z_sun`` which set the sun-Galactic center distance, the 3D velocity vector
# of the sun, and the height of the sun above the Galactic midplane,
# respectively. The velocity of the sun can be specified as an
# `~astropy.units.Quantity` object with velocity units and is interpreted as a
# Cartesian velocity, as in the example below. Note that, as with the positions,
# the Galactocentric frame is a right-handed system (i.e., the Sun is at negative
# x values) so ``v_x`` is opposite of the Galactocentric radial velocity:
v_sun = [11.1, 244, 7.25] * (u.km / u.s) # [vx, vy, vz]
gc_frame = coord.Galactocentric(
galcen_distance=8*u.kpc,
galcen_v_sun=v_sun,
z_sun=0*u.pc)
##############################################################################
# We can then transform to this frame instead, with our custom parameters:
gc2 = c1.transform_to(gc_frame)
print(gc2.v_x, gc2.v_y, gc2.v_z)
##############################################################################
# It's sometimes useful to specify the solar motion using the `proper motion
# of Sgr A* <https://arxiv.org/abs/astro-ph/0408107>`_ instead of Cartesian
# velocity components. With an assumed distance, we can convert proper motion
# components to Cartesian velocity components using `astropy.units`:
galcen_distance = 8*u.kpc
pm_gal_sgrA = [-6.379, -0.202] * u.mas/u.yr # from Reid & Brunthaler 2004
vy, vz = -(galcen_distance * pm_gal_sgrA).to(u.km/u.s, u.dimensionless_angles())
##############################################################################
# We still have to assume a line-of-sight velocity for the Galactic center,
# which we will again take to be 11 km/s:
vx = 11.1 * u.km/u.s
v_sun2 = u.Quantity([vx, vy, vz]) # List of Quantity -> a single Quantity
gc_frame2 = coord.Galactocentric(galcen_distance=galcen_distance,
galcen_v_sun=v_sun2,
z_sun=0*u.pc)
gc3 = c1.transform_to(gc_frame2)
print(gc3.v_x, gc3.v_y, gc3.v_z)
##############################################################################
# The transformations also work in the opposite direction. This can be useful
# for transforming simulated or theoretical data to observable quantities. As
# an example, we'll generate 4 theoretical circular orbits at different
# Galactocentric radii with the same circular velocity, and transform them to
# Heliocentric coordinates:
ring_distances = np.arange(10, 25+1, 5) * u.kpc
circ_velocity = 220 * u.km/u.s
phi_grid = np.linspace(90, 270, 512) * u.degree # grid of azimuths
ring_rep = coord.CylindricalRepresentation(
rho=ring_distances[:,np.newaxis],
phi=phi_grid[np.newaxis],
z=np.zeros_like(ring_distances)[:,np.newaxis])
angular_velocity = (-circ_velocity / ring_distances).to(u.mas/u.yr,
u.dimensionless_angles())
ring_dif = coord.CylindricalDifferential(
d_rho=np.zeros(phi_grid.shape)[np.newaxis]*u.km/u.s,
d_phi=angular_velocity[:,np.newaxis],
d_z=np.zeros(phi_grid.shape)[np.newaxis]*u.km/u.s
)
ring_rep = ring_rep.with_differentials(ring_dif)
gc_rings = coord.SkyCoord(ring_rep, frame=coord.Galactocentric)
##############################################################################
# First, let's visualize the geometry in Galactocentric coordinates. Here are
# the positions and velocities of the rings; note that in the velocity plot,
# the velocities of the 4 rings are identical and thus overlaid under the same
# curve:
fig,axes = plt.subplots(1, 2, figsize=(12,6))
# Positions
axes[0].plot(gc_rings.x.T, gc_rings.y.T, marker='None', linewidth=3)
axes[0].text(-8., 0, r'$\odot$', fontsize=20)
axes[0].set_xlim(-30, 30)
axes[0].set_ylim(-30, 30)
axes[0].set_xlabel('$x$ [kpc]')
axes[0].set_ylabel('$y$ [kpc]')
# Velocities
axes[1].plot(gc_rings.v_x.T, gc_rings.v_y.T, marker='None', linewidth=3)
axes[1].set_xlim(-250, 250)
axes[1].set_ylim(-250, 250)
axes[1].set_xlabel(f"$v_x$ [{(u.km / u.s).to_string('latex_inline')}]")
axes[1].set_ylabel(f"$v_y$ [{(u.km / u.s).to_string('latex_inline')}]")
fig.tight_layout()
plt.show()
##############################################################################
# Now we can transform to Galactic coordinates and visualize the rings in
# observable coordinates:
gal_rings = gc_rings.transform_to(coord.Galactic)
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
for i in range(len(ring_distances)):
ax.plot(gal_rings[i].l.degree, gal_rings[i].pm_l_cosb.value,
label=str(ring_distances[i]), marker='None', linewidth=3)
ax.set_xlim(360, 0)
ax.set_xlabel('$l$ [deg]')
ax.set_ylabel(fr'$\mu_l \, \cos b$ [{(u.mas/u.yr).to_string("latex_inline")}]')
ax.legend()
plt.show()
|
9fb1adf27f1b0d176bd8379a46583440f1a6b600a4eb216d4d86fdee9f51660d | """
===================================================================
Determining and plotting the altitude/azimuth of a celestial object
===================================================================
This example demonstrates coordinate transformations and the creation of
visibility curves to assist with observing run planning.
In this example, we make a `~astropy.coordinates.SkyCoord` instance for M33.
The altitude-azimuth coordinates are then found using
`astropy.coordinates.EarthLocation` and `astropy.time.Time` objects.
This example is meant to demonstrate the capabilities of the
`astropy.coordinates` package. For more convenient and/or complex observation
planning, consider the `astroplan <https://astroplan.readthedocs.org/>`_
package.
*By: Erik Tollerud, Kelle Cruz*
*License: BSD*
"""
##############################################################################
# Let's suppose you are planning to visit picturesque Bear Mountain State Park
# in New York, USA. You're bringing your telescope with you (of course), and
# someone told you M33 is a great target to observe there. You happen to know
# you're free at 11:00 pm local time, and you want to know if it will be up.
# Astropy can answer that.
#
# Import numpy and matplotlib. For the latter, use a nicer set of plot
# parameters and set up support for plotting/converting quantities.
import numpy as np
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style, quantity_support
plt.style.use(astropy_mpl_style)
quantity_support()
##############################################################################
# Import the packages necessary for finding coordinates and making
# coordinate transformations
import astropy.units as u
from astropy.time import Time
from astropy.coordinates import SkyCoord, EarthLocation, AltAz
##############################################################################
# `astropy.coordinates.SkyCoord.from_name` uses Simbad to resolve object
# names and retrieve coordinates.
#
# Get the coordinates of M33:
m33 = SkyCoord.from_name('M33')
##############################################################################
# Use `astropy.coordinates.EarthLocation` to provide the location of Bear
# Mountain and set the time to 11pm EDT on 2012 July 12:
bear_mountain = EarthLocation(lat=41.3*u.deg, lon=-74*u.deg, height=390*u.m)
utcoffset = -4*u.hour # Eastern Daylight Time
time = Time('2012-7-12 23:00:00') - utcoffset
##############################################################################
# `astropy.coordinates.EarthLocation.get_site_names` and
# `~astropy.coordinates.EarthLocation.get_site_names` can be used to get
# locations of major observatories.
#
# Use `astropy.coordinates` to find the Alt, Az coordinates of M33 at as
# observed from Bear Mountain at 11pm on 2012 July 12.
m33altaz = m33.transform_to(AltAz(obstime=time,location=bear_mountain))
print(f"M33's Altitude = {m33altaz.alt:.2}")
##############################################################################
# This is helpful since it turns out M33 is barely above the horizon at this
# time. It's more informative to find M33's airmass over the course of
# the night.
#
# Find the alt,az coordinates of M33 at 100 times evenly spaced between 10pm
# and 7am EDT:
midnight = Time('2012-7-13 00:00:00') - utcoffset
delta_midnight = np.linspace(-2, 10, 100)*u.hour
frame_July13night = AltAz(obstime=midnight+delta_midnight,
location=bear_mountain)
m33altazs_July13night = m33.transform_to(frame_July13night)
##############################################################################
# convert alt, az to airmass with `~astropy.coordinates.AltAz.secz` attribute:
m33airmasss_July13night = m33altazs_July13night.secz
##############################################################################
# Plot the airmass as a function of time:
plt.plot(delta_midnight, m33airmasss_July13night)
plt.xlim(-2, 10)
plt.ylim(1, 4)
plt.xlabel('Hours from EDT Midnight')
plt.ylabel('Airmass [Sec(z)]')
plt.show()
##############################################################################
# Use `~astropy.coordinates.get_sun` to find the location of the Sun at 1000
# evenly spaced times between noon on July 12 and noon on July 13:
from astropy.coordinates import get_sun
delta_midnight = np.linspace(-12, 12, 1000)*u.hour
times_July12_to_13 = midnight + delta_midnight
frame_July12_to_13 = AltAz(obstime=times_July12_to_13, location=bear_mountain)
sunaltazs_July12_to_13 = get_sun(times_July12_to_13).transform_to(frame_July12_to_13)
##############################################################################
# Do the same with `~astropy.coordinates.get_moon` to find when the moon is
# up. Be aware that this will need to download a 10MB file from the internet
# to get a precise location of the moon.
from astropy.coordinates import get_moon
moon_July12_to_13 = get_moon(times_July12_to_13)
moonaltazs_July12_to_13 = moon_July12_to_13.transform_to(frame_July12_to_13)
##############################################################################
# Find the alt,az coordinates of M33 at those same times:
m33altazs_July12_to_13 = m33.transform_to(frame_July12_to_13)
##############################################################################
# Make a beautiful figure illustrating nighttime and the altitudes of M33 and
# the Sun over that time:
plt.plot(delta_midnight, sunaltazs_July12_to_13.alt, color='r', label='Sun')
plt.plot(delta_midnight, moonaltazs_July12_to_13.alt, color=[0.75]*3, ls='--', label='Moon')
plt.scatter(delta_midnight, m33altazs_July12_to_13.alt,
c=m33altazs_July12_to_13.az, label='M33', lw=0, s=8,
cmap='viridis')
plt.fill_between(delta_midnight, 0*u.deg, 90*u.deg,
sunaltazs_July12_to_13.alt < -0*u.deg, color='0.5', zorder=0)
plt.fill_between(delta_midnight, 0*u.deg, 90*u.deg,
sunaltazs_July12_to_13.alt < -18*u.deg, color='k', zorder=0)
plt.colorbar().set_label('Azimuth [deg]')
plt.legend(loc='upper left')
plt.xlim(-12*u.hour, 12*u.hour)
plt.xticks((np.arange(13)*2-12)*u.hour)
plt.ylim(0*u.deg, 90*u.deg)
plt.xlabel('Hours from EDT Midnight')
plt.ylabel('Altitude [deg]')
plt.show()
|
c03e7111429f164f582d899ced1d410302cad7afe926a0ac23f2577be2fd4b71 | """
================================================================
Convert a radial velocity to the Galactic Standard of Rest (GSR)
================================================================
Radial or line-of-sight velocities of sources are often reported in a
Heliocentric or Solar-system barycentric reference frame. A common
transformation incorporates the projection of the Sun's motion along the
line-of-sight to the target, hence transforming it to a Galactic rest frame
instead (sometimes referred to as the Galactic Standard of Rest, GSR). This
transformation depends on the assumptions about the orientation of the Galactic
frame relative to the bary- or Heliocentric frame. It also depends on the
assumed solar velocity vector. Here we'll demonstrate how to perform this
transformation using a sky position and barycentric radial-velocity.
*By: Adrian Price-Whelan*
*License: BSD*
"""
################################################################################
# Make print work the same in all versions of Python and import the required
# Astropy packages:
import astropy.units as u
import astropy.coordinates as coord
################################################################################
# Use the latest convention for the Galactocentric coordinates
coord.galactocentric_frame_defaults.set('latest')
################################################################################
# For this example, let's work with the coordinates and barycentric radial
# velocity of the star HD 155967, as obtained from
# `Simbad <https://simbad.u-strasbg.fr/simbad/>`_:
icrs = coord.SkyCoord(ra=258.58356362*u.deg, dec=14.55255619*u.deg,
radial_velocity=-16.1*u.km/u.s, frame='icrs')
################################################################################
# We next need to decide on the velocity of the Sun in the assumed GSR frame.
# We'll use the same velocity vector as used in the
# `~astropy.coordinates.Galactocentric` frame, and convert it to a
# `~astropy.coordinates.CartesianRepresentation` object using the
# ``.to_cartesian()`` method of the
# `~astropy.coordinates.CartesianDifferential` object ``galcen_v_sun``:
v_sun = coord.Galactocentric().galcen_v_sun.to_cartesian()
################################################################################
# We now need to get a unit vector in the assumed Galactic frame from the sky
# position in the ICRS frame above. We'll use this unit vector to project the
# solar velocity onto the line-of-sight:
gal = icrs.transform_to(coord.Galactic)
cart_data = gal.data.to_cartesian()
unit_vector = cart_data / cart_data.norm()
################################################################################
# Now we project the solar velocity using this unit vector:
v_proj = v_sun.dot(unit_vector)
################################################################################
# Finally, we add the projection of the solar velocity to the radial velocity
# to get a GSR radial velocity:
rv_gsr = icrs.radial_velocity + v_proj
print(rv_gsr)
################################################################################
# We could wrap this in a function so we can control the solar velocity and
# re-use the above code:
def rv_to_gsr(c, v_sun=None):
"""Transform a barycentric radial velocity to the Galactic Standard of Rest
(GSR).
The input radial velocity must be passed in as a
Parameters
----------
c : `~astropy.coordinates.BaseCoordinateFrame` subclass instance
The radial velocity, associated with a sky coordinates, to be
transformed.
v_sun : `~astropy.units.Quantity`, optional
The 3D velocity of the solar system barycenter in the GSR frame.
Defaults to the same solar motion as in the
`~astropy.coordinates.Galactocentric` frame.
Returns
-------
v_gsr : `~astropy.units.Quantity`
The input radial velocity transformed to a GSR frame.
"""
if v_sun is None:
v_sun = coord.Galactocentric().galcen_v_sun.to_cartesian()
gal = c.transform_to(coord.Galactic)
cart_data = gal.data.to_cartesian()
unit_vector = cart_data / cart_data.norm()
v_proj = v_sun.dot(unit_vector)
return c.radial_velocity + v_proj
rv_gsr = rv_to_gsr(icrs)
print(rv_gsr)
|
05aa2b55ee2905abe731867c7c027d8fd21dc5e6b04c605e07045a7501508441 | r"""
==========================================================
Create a new coordinate class (for the Sagittarius stream)
==========================================================
This document describes in detail how to subclass and define a custom spherical
coordinate frame, as discussed in :ref:`astropy:astropy-coordinates-design` and
the docstring for `~astropy.coordinates.BaseCoordinateFrame`. In this example,
we will define a coordinate system defined by the plane of orbit of the
Sagittarius Dwarf Galaxy (hereafter Sgr; as defined in Majewski et al. 2003).
The Sgr coordinate system is often referred to in terms of two angular
coordinates, :math:`\Lambda,B`.
To do this, we need to define a subclass of
`~astropy.coordinates.BaseCoordinateFrame` that knows the names and units of the
coordinate system angles in each of the supported representations. In this case
we support `~astropy.coordinates.SphericalRepresentation` with "Lambda" and
"Beta". Then we have to define the transformation from this coordinate system to
some other built-in system. Here we will use Galactic coordinates, represented
by the `~astropy.coordinates.Galactic` class.
See Also
--------
* The `gala package <http://gala.adrian.pw/>`_, which defines a number of
Astropy coordinate frames for stellar stream coordinate systems.
* Majewski et al. 2003, "A Two Micron All Sky Survey View of the Sagittarius
Dwarf Galaxy. I. Morphology of the Sagittarius Core and Tidal Arms",
https://arxiv.org/abs/astro-ph/0304198
* Law & Majewski 2010, "The Sagittarius Dwarf Galaxy: A Model for Evolution in a
Triaxial Milky Way Halo", https://arxiv.org/abs/1003.1132
* David Law's Sgr info page https://www.stsci.edu/~dlaw/Sgr/
*By: Adrian Price-Whelan, Erik Tollerud*
*License: BSD*
"""
##############################################################################
# Make `print` work the same in all versions of Python, set up numpy,
# matplotlib, and use a nicer set of plot parameters:
import numpy as np
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
##############################################################################
# Import the packages necessary for coordinates
from astropy.coordinates import frame_transform_graph
from astropy.coordinates.matrix_utilities import rotation_matrix, matrix_product, matrix_transpose
import astropy.coordinates as coord
import astropy.units as u
##############################################################################
# The first step is to create a new class, which we'll call
# ``Sagittarius`` and make it a subclass of
# `~astropy.coordinates.BaseCoordinateFrame`:
class Sagittarius(coord.BaseCoordinateFrame):
"""
A Heliocentric spherical coordinate system defined by the orbit
of the Sagittarius dwarf galaxy, as described in
https://ui.adsabs.harvard.edu/abs/2003ApJ...599.1082M
and further explained in
https://www.stsci.edu/~dlaw/Sgr/.
Parameters
----------
representation : `~astropy.coordinates.BaseRepresentation` or None
A representation object or None to have no data (or use the other keywords)
Lambda : `~astropy.coordinates.Angle`, optional, must be keyword
The longitude-like angle corresponding to Sagittarius' orbit.
Beta : `~astropy.coordinates.Angle`, optional, must be keyword
The latitude-like angle corresponding to Sagittarius' orbit.
distance : `~astropy.units.Quantity`, optional, must be keyword
The Distance for this object along the line-of-sight.
pm_Lambda_cosBeta : `~astropy.units.Quantity`, optional, must be keyword
The proper motion along the stream in ``Lambda`` (including the
``cos(Beta)`` factor) for this object (``pm_Beta`` must also be given).
pm_Beta : `~astropy.units.Quantity`, optional, must be keyword
The proper motion in Declination for this object (``pm_ra_cosdec`` must
also be given).
radial_velocity : `~astropy.units.Quantity`, optional, keyword-only
The radial velocity of this object.
"""
default_representation = coord.SphericalRepresentation
default_differential = coord.SphericalCosLatDifferential
frame_specific_representation_info = {
coord.SphericalRepresentation: [
coord.RepresentationMapping('lon', 'Lambda'),
coord.RepresentationMapping('lat', 'Beta'),
coord.RepresentationMapping('distance', 'distance')]
}
##############################################################################
# Breaking this down line-by-line, we define the class as a subclass of
# `~astropy.coordinates.BaseCoordinateFrame`. Then we include a descriptive
# docstring. The final lines are class-level attributes that specify the
# default representation for the data, default differential for the velocity
# information, and mappings from the attribute names used by representation
# objects to the names that are to be used by the ``Sagittarius`` frame. In this
# case we override the names in the spherical representations but don't do
# anything with other representations like cartesian or cylindrical.
#
# Next we have to define the transformation from this coordinate system to some
# other built-in coordinate system; we will use Galactic coordinates. We can do
# this by defining functions that return transformation matrices, or by simply
# defining a function that accepts a coordinate and returns a new coordinate in
# the new system. Because the transformation to the Sagittarius coordinate
# system is just a spherical rotation from Galactic coordinates, we'll just
# define a function that returns this matrix. We'll start by constructing the
# transformation matrix using pre-determined Euler angles and the
# ``rotation_matrix`` helper function:
SGR_PHI = (180 + 3.75) * u.degree # Euler angles (from Law & Majewski 2010)
SGR_THETA = (90 - 13.46) * u.degree
SGR_PSI = (180 + 14.111534) * u.degree
# Generate the rotation matrix using the x-convention (see Goldstein)
D = rotation_matrix(SGR_PHI, "z")
C = rotation_matrix(SGR_THETA, "x")
B = rotation_matrix(SGR_PSI, "z")
A = np.diag([1.,1.,-1.])
SGR_MATRIX = matrix_product(A, B, C, D)
##############################################################################
# Since we already constructed the transformation (rotation) matrix above, and
# the inverse of a rotation matrix is just its transpose, the required
# transformation functions are very simple:
@frame_transform_graph.transform(coord.StaticMatrixTransform, coord.Galactic, Sagittarius)
def galactic_to_sgr():
""" Compute the transformation matrix from Galactic spherical to
heliocentric Sgr coordinates.
"""
return SGR_MATRIX
##############################################################################
# The decorator ``@frame_transform_graph.transform(coord.StaticMatrixTransform,
# coord.Galactic, Sagittarius)`` registers this function on the
# ``frame_transform_graph`` as a coordinate transformation. Inside the function,
# we simply return the previously defined rotation matrix.
#
# We then register the inverse transformation by using the transpose of the
# rotation matrix (which is faster to compute than the inverse):
@frame_transform_graph.transform(coord.StaticMatrixTransform, Sagittarius, coord.Galactic)
def sgr_to_galactic():
""" Compute the transformation matrix from heliocentric Sgr coordinates to
spherical Galactic.
"""
return matrix_transpose(SGR_MATRIX)
##############################################################################
# Now that we've registered these transformations between ``Sagittarius`` and
# `~astropy.coordinates.Galactic`, we can transform between *any* coordinate
# system and ``Sagittarius`` (as long as the other system has a path to
# transform to `~astropy.coordinates.Galactic`). For example, to transform from
# ICRS coordinates to ``Sagittarius``, we would do:
icrs = coord.SkyCoord(280.161732*u.degree, 11.91934*u.degree, frame='icrs')
sgr = icrs.transform_to(Sagittarius)
print(sgr)
##############################################################################
# Or, to transform from the ``Sagittarius`` frame to ICRS coordinates (in this
# case, a line along the ``Sagittarius`` x-y plane):
sgr = coord.SkyCoord(Lambda=np.linspace(0, 2*np.pi, 128)*u.radian,
Beta=np.zeros(128)*u.radian, frame='sagittarius')
icrs = sgr.transform_to(coord.ICRS)
print(icrs)
##############################################################################
# As an example, we'll now plot the points in both coordinate systems:
fig, axes = plt.subplots(2, 1, figsize=(8, 10),
subplot_kw={'projection': 'aitoff'})
axes[0].set_title("Sagittarius")
axes[0].plot(sgr.Lambda.wrap_at(180*u.deg).radian, sgr.Beta.radian,
linestyle='none', marker='.')
axes[1].set_title("ICRS")
axes[1].plot(icrs.ra.wrap_at(180*u.deg).radian, icrs.dec.radian,
linestyle='none', marker='.')
plt.show()
##############################################################################
# This particular transformation is just a spherical rotation, which is a
# special case of an Affine transformation with no vector offset. The
# transformation of velocity components is therefore natively supported as
# well:
sgr = coord.SkyCoord(Lambda=np.linspace(0, 2*np.pi, 128)*u.radian,
Beta=np.zeros(128)*u.radian,
pm_Lambda_cosBeta=np.random.uniform(-5, 5, 128)*u.mas/u.yr,
pm_Beta=np.zeros(128)*u.mas/u.yr,
frame='sagittarius')
icrs = sgr.transform_to(coord.ICRS)
print(icrs)
fig, axes = plt.subplots(3, 1, figsize=(8, 10), sharex=True)
axes[0].set_title("Sagittarius")
axes[0].plot(sgr.Lambda.degree,
sgr.pm_Lambda_cosBeta.value,
linestyle='none', marker='.')
axes[0].set_xlabel(r"$\Lambda$ [deg]")
axes[0].set_ylabel(
fr"$\mu_\Lambda \, \cos B$ [{sgr.pm_Lambda_cosBeta.unit.to_string('latex_inline')}]")
axes[1].set_title("ICRS")
axes[1].plot(icrs.ra.degree, icrs.pm_ra_cosdec.value,
linestyle='none', marker='.')
axes[1].set_ylabel(
fr"$\mu_\alpha \, \cos\delta$ [{icrs.pm_ra_cosdec.unit.to_string('latex_inline')}]")
axes[2].set_title("ICRS")
axes[2].plot(icrs.ra.degree, icrs.pm_dec.value,
linestyle='none', marker='.')
axes[2].set_xlabel("RA [deg]")
axes[2].set_ylabel(
fr"$\mu_\delta$ [{icrs.pm_dec.unit.to_string('latex_inline')}]")
plt.show()
|
8642e8dc8eaf9a057bc6aedc7aa2de06ae2e6ebddb22b76ea5bf79edabe62129 | """
=====================================================
Create a multi-extension FITS (MEF) file from scratch
=====================================================
This example demonstrates how to create a multi-extension FITS (MEF)
file from scratch using `astropy.io.fits`.
*By: Erik Bray*
*License: BSD*
"""
import os
##############################################################################
# HDUList objects are used to hold all the HDUs in a FITS file. This
# ``HDUList`` class is a subclass of Python's builtin `list`. and can be
# created from scratch. For example, to create a FITS file with
# three extensions:
from astropy.io import fits
new_hdul = fits.HDUList()
new_hdul.append(fits.ImageHDU())
new_hdul.append(fits.ImageHDU())
##############################################################################
# Write out the new file to disk:
new_hdul.writeto('test.fits')
##############################################################################
# Alternatively, the HDU instances can be created first (or read from an
# existing FITS file).
#
# Create a multi-extension FITS file with two empty IMAGE extensions (a
# default PRIMARY HDU is prepended automatically if one is not specified;
# we use ``overwrite=True`` to overwrite the file if it already exists):
hdu1 = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
new_hdul = fits.HDUList([hdu1, hdu2])
new_hdul.writeto('test.fits', overwrite=True)
##############################################################################
# Finally, we'll remove the file we created:
os.remove('test.fits')
|
b89c61fff37b9bda317b3e4695e3a2145befddb5f218bcf1e5fb25837755c03e | """
==================
Edit a FITS header
==================
This example describes how to edit a value in a FITS header
using `astropy.io.fits`.
*By: Adrian Price-Whelan*
*License: BSD*
"""
from astropy.io import fits
##############################################################################
# Download a FITS file:
from astropy.utils.data import get_pkg_data_filename
fits_file = get_pkg_data_filename('tutorials/FITS-Header/input_file.fits')
##############################################################################
# Look at contents of the FITS file
fits.info(fits_file)
##############################################################################
# Look at the headers of the two extensions:
print("Before modifications:")
print()
print("Extension 0:")
print(repr(fits.getheader(fits_file, 0)))
print()
print("Extension 1:")
print(repr(fits.getheader(fits_file, 1)))
##############################################################################
# `astropy.io.fits` provides an object-oriented interface for reading and
# interacting with FITS files, but for small operations (like this example) it
# is often easier to use the
# `convenience functions <https://docs.astropy.org/en/latest/io/fits/index.html#convenience-functions>`_.
#
# To edit a single header value in the header for extension 0, use the
# `~astropy.io.fits.setval()` function. For example, set the OBJECT keyword
# to 'M31':
fits.setval(fits_file, 'OBJECT', value='M31')
##############################################################################
# With no extra arguments, this will modify the header for extension 0, but
# this can be changed using the ``ext`` keyword argument. For example, we can
# specify extension 1 instead:
fits.setval(fits_file, 'OBJECT', value='M31', ext=1)
##############################################################################
# This can also be used to create a new keyword-value pair ("card" in FITS
# lingo):
fits.setval(fits_file, 'ANEWKEY', value='some value')
##############################################################################
# Again, this is useful for one-off modifications, but can be inefficient
# for operations like editing multiple headers in the same file
# because `~astropy.io.fits.setval()` loads the whole file each time it
# is called. To make several modifications, it's better to load the file once:
with fits.open(fits_file, 'update') as f:
for hdu in f:
hdu.header['OBJECT'] = 'CAT'
print("After modifications:")
print()
print("Extension 0:")
print(repr(fits.getheader(fits_file, 0)))
print()
print("Extension 1:")
print(repr(fits.getheader(fits_file, 1)))
|
8e3a662ea20c769c5879616b0333356d69efb42bf12e301e65f237e4a910bfab | """
=======================================
Read and plot an image from a FITS file
=======================================
This example opens an image stored in a FITS file and displays it to the screen.
This example uses `astropy.utils.data` to download the file, `astropy.io.fits` to open
the file, and `matplotlib.pyplot` to display the image.
*By: Lia R. Corrales, Adrian Price-Whelan, Kelle Cruz*
*License: BSD*
"""
##############################################################################
# Set up matplotlib and use a nicer set of plot parameters
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
##############################################################################
# Download the example FITS files used by this example:
from astropy.utils.data import get_pkg_data_filename
from astropy.io import fits
image_file = get_pkg_data_filename('tutorials/FITS-images/HorseHead.fits')
##############################################################################
# Use `astropy.io.fits.info()` to display the structure of the file:
fits.info(image_file)
##############################################################################
# Generally the image information is located in the Primary HDU, also known
# as extension 0. Here, we use `astropy.io.fits.getdata()` to read the image
# data from this first extension using the keyword argument ``ext=0``:
image_data = fits.getdata(image_file, ext=0)
##############################################################################
# The data is now stored as a 2D numpy array. Print the dimensions using the
# shape attribute:
print(image_data.shape)
##############################################################################
# Display the image data:
plt.figure()
plt.imshow(image_data, cmap='gray')
plt.colorbar()
|
9d0c109debb4f6a76d04580fd4a019af360fe26f188be4264d3bb7ba2c8a2a2f | """
==========================================
Create a very large FITS file from scratch
==========================================
This example demonstrates how to create a large file (larger than will fit in
memory) from scratch using `astropy.io.fits`.
*By: Erik Bray*
*License: BSD*
"""
##############################################################################
# Normally to create a single image FITS file one would do something like:
import os
import numpy as np
from astropy.io import fits
data = np.zeros((40000, 40000), dtype=np.float64)
hdu = fits.PrimaryHDU(data=data)
##############################################################################
# Then use the `astropy.io.fits.writeto()` method to write out the new
# file to disk
hdu.writeto('large.fits')
##############################################################################
# However, a 40000 x 40000 array of doubles is nearly twelve gigabytes! Most
# systems won't be able to create that in memory just to write out to disk. In
# order to create such a large file efficiently requires a little extra work,
# and a few assumptions.
#
# First, it is helpful to anticipate about how large (as in, how many keywords)
# the header will have in it. FITS headers must be written in 2880 byte
# blocks, large enough for 36 keywords per block (including the END keyword in
# the final block). Typical headers have somewhere between 1 and 4 blocks,
# though sometimes more.
#
# Since the first thing we write to a FITS file is the header, we want to write
# enough header blocks so that there is plenty of padding in which to add new
# keywords without having to resize the whole file. Say you want the header to
# use 4 blocks by default. Then, excluding the END card which Astropy will add
# automatically, create the header and pad it out to 36 * 4 cards.
#
# Create a stub array to initialize the HDU; its
# exact size is irrelevant, as long as it has the desired number of
# dimensions
data = np.zeros((100, 100), dtype=np.float64)
hdu = fits.PrimaryHDU(data=data)
header = hdu.header
while len(header) < (36 * 4 - 1):
header.append() # Adds a blank card to the end
##############################################################################
# Now adjust the NAXISn keywords to the desired size of the array, and write
# only the header out to a file. Using the ``hdu.writeto()`` method will cause
# astropy to "helpfully" reset the NAXISn keywords to match the size of the
# dummy array. That is because it works hard to ensure that only valid FITS
# files are written. Instead, we can write just the header to a file using the
# `astropy.io.fits.Header.tofile` method:
header['NAXIS1'] = 40000
header['NAXIS2'] = 40000
header.tofile('large.fits')
##############################################################################
# Finally, grow out the end of the file to match the length of the
# data (plus the length of the header). This can be done very efficiently on
# most systems by seeking past the end of the file and writing a single byte,
# like so:
with open('large.fits', 'rb+') as fobj:
# Seek past the length of the header, plus the length of the
# Data we want to write.
# 8 is the number of bytes per value, i.e. abs(header['BITPIX'])/8
# (this example is assuming a 64-bit float)
# The -1 is to account for the final byte that we are about to
# write:
fobj.seek(len(header.tostring()) + (40000 * 40000 * 8) - 1)
fobj.write(b'\0')
##############################################################################
# More generally, this can be written:
shape = tuple(header[f'NAXIS{ii}'] for ii in range(1, header['NAXIS']+1))
with open('large.fits', 'rb+') as fobj:
fobj.seek(len(header.tostring()) + (np.product(shape) * np.abs(header['BITPIX']//8)) - 1)
fobj.write(b'\0')
##############################################################################
# On modern operating systems this will cause the file (past the header) to be
# filled with zeros out to the ~12GB needed to hold a 40000 x 40000 image. On
# filesystems that support sparse file creation (most Linux filesystems, but not
# the HFS+ filesystem used by most Macs) this is a very fast, efficient
# operation. On other systems your mileage may vary.
#
# This isn't the only way to build up a large file, but probably one of the
# safest. This method can also be used to create large multi-extension FITS
# files, with a little care.
##############################################################################
# Finally, we'll remove the file we created:
os.remove('large.fits')
|
3746afad4111a09da3c6cf3c5e6bf33e892ba956ce2410e6a7c461d407b486f2 | """
=====================================================================
Accessing data stored as a table in a multi-extension FITS (MEF) file
=====================================================================
FITS files can often contain large amount of multi-dimensional data and
tables. This example opens a FITS file with information
from Chandra's HETG-S instrument.
The example uses `astropy.utils.data` to download multi-extension FITS (MEF)
file, `astropy.io.fits` to investigate the header, and
`astropy.table.Table` to explore the data.
*By: Lia Corrales, Adrian Price-Whelan, and Kelle Cruz*
*License: BSD*
"""
##############################################################################
# Use `astropy.utils.data` subpackage to download the FITS file used in this
# example. Also import `~astropy.table.Table` from the `astropy.table` subpackage
# and `astropy.io.fits`
from astropy.utils.data import get_pkg_data_filename
from astropy.table import Table
from astropy.io import fits
##############################################################################
# Download a FITS file
event_filename = get_pkg_data_filename('tutorials/FITS-tables/chandra_events.fits')
##############################################################################
# Display information about the contents of the FITS file.
fits.info(event_filename)
##############################################################################
# Extension 1, EVENTS, is a Table that contains information about each X-ray
# photon that hit Chandra's HETG-S detector.
#
# Use `~astropy.table.Table` to read the table
events = Table.read(event_filename, hdu=1)
##############################################################################
# Print the column names of the Events Table.
print(events.columns)
##############################################################################
# If a column contains unit information, it will have an associated
# `astropy.units` object.
print(events['energy'].unit)
##############################################################################
# Print the data stored in the Energy column.
print(events['energy'])
|
90b047a9970bc5e95f14844e634c0258de5d20a57e475710f0e7175c633ae8c4 | """
=====================================================
Convert a 3-color image (JPG) to separate FITS images
=====================================================
This example opens an RGB JPEG image and writes out each channel as a separate
FITS (image) file.
This example uses `pillow <https://python-pillow.org>`_ to read the image,
`matplotlib.pyplot` to display the image, and `astropy.io.fits` to save FITS files.
*By: Erik Bray, Adrian Price-Whelan*
*License: BSD*
"""
import numpy as np
from PIL import Image
from astropy.io import fits
##############################################################################
# Set up matplotlib and use a nicer set of plot parameters
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
##############################################################################
# Load and display the original 3-color jpeg image:
image = Image.open('Hs-2009-14-a-web.jpg')
xsize, ysize = image.size
print(f"Image size: {ysize} x {xsize}")
print(f"Image bands: {image.getbands()}")
ax = plt.imshow(image)
##############################################################################
# Split the three channels (RGB) and get the data as Numpy arrays. The arrays
# are flattened, so they are 1-dimensional:
r, g, b = image.split()
r_data = np.array(r.getdata()) # data is now an array of length ysize*xsize
g_data = np.array(g.getdata())
b_data = np.array(b.getdata())
print(r_data.shape)
##############################################################################
# Reshape the image arrays to be 2-dimensional:
r_data = r_data.reshape(ysize, xsize) # data is now a matrix (ysize, xsize)
g_data = g_data.reshape(ysize, xsize)
b_data = b_data.reshape(ysize, xsize)
print(r_data.shape)
##############################################################################
# Write out the channels as separate FITS images.
# Add and visualize header info
red = fits.PrimaryHDU(data=r_data)
red.header['LATOBS'] = "32:11:56" # add spurious header info
red.header['LONGOBS'] = "110:56"
red.writeto('red.fits')
green = fits.PrimaryHDU(data=g_data)
green.header['LATOBS'] = "32:11:56"
green.header['LONGOBS'] = "110:56"
green.writeto('green.fits')
blue = fits.PrimaryHDU(data=b_data)
blue.header['LATOBS'] = "32:11:56"
blue.header['LONGOBS'] = "110:56"
blue.writeto('blue.fits')
from pprint import pprint
pprint(red.header)
##############################################################################
# Delete the files created
import os
os.remove('red.fits')
os.remove('green.fits')
os.remove('blue.fits')
|
612e9cb9f959326bcdaf05711ac31acd460f8cbc6822518b6aec8ee65ca628ff | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import numpy as np
from numpy.core.multiarray import normalize_axis_index
from astropy.units import Quantity
from astropy.utils import isiterable
from astropy.utils.exceptions import AstropyUserWarning
from astropy.stats._fast_sigma_clip import _sigma_clip_fast
from astropy.stats.funcs import mad_std
from astropy.utils.compat.optional_deps import HAS_BOTTLENECK
if HAS_BOTTLENECK:
import bottleneck
__all__ = ['SigmaClip', 'sigma_clip', 'sigma_clipped_stats']
def _move_tuple_axes_first(array, axis):
"""
Bottleneck can only take integer axis, not tuple, so this function
takes all the axes to be operated on and combines them into the
first dimension of the array so that we can then use axis=0.
"""
# Figure out how many axes we are operating over
naxis = len(axis)
# Add remaining axes to the axis tuple
axis += tuple(i for i in range(array.ndim) if i not in axis)
# The new position of each axis is just in order
destination = tuple(range(array.ndim))
# Reorder the array so that the axes being operated on are at the
# beginning
array_new = np.moveaxis(array, axis, destination)
# Collapse the dimensions being operated on into a single dimension
# so that we can then use axis=0 with the bottleneck functions
array_new = array_new.reshape((-1,) + array_new.shape[naxis:])
return array_new
def _nanmean(array, axis=None):
"""Bottleneck nanmean function that handle tuple axis."""
if isinstance(axis, tuple):
array = _move_tuple_axes_first(array, axis=axis)
axis = 0
if isinstance(array, Quantity):
return array.__array_wrap__(bottleneck.nanmean(array, axis=axis))
else:
return bottleneck.nanmean(array, axis=axis)
def _nanmedian(array, axis=None):
"""Bottleneck nanmedian function that handle tuple axis."""
if isinstance(axis, tuple):
array = _move_tuple_axes_first(array, axis=axis)
axis = 0
if isinstance(array, Quantity):
return array.__array_wrap__(bottleneck.nanmedian(array, axis=axis))
else:
return bottleneck.nanmedian(array, axis=axis)
def _nanstd(array, axis=None, ddof=0):
"""Bottleneck nanstd function that handle tuple axis."""
if isinstance(axis, tuple):
array = _move_tuple_axes_first(array, axis=axis)
axis = 0
if isinstance(array, Quantity):
return array.__array_wrap__(bottleneck.nanstd(array, axis=axis,
ddof=ddof))
else:
return bottleneck.nanstd(array, axis=axis, ddof=ddof)
def _nanmadstd(array, axis=None):
"""mad_std function that ignores NaNs by default."""
return mad_std(array, axis=axis, ignore_nan=True)
class SigmaClip:
"""
Class to perform sigma clipping.
The data will be iterated over, each time rejecting values that are
less or more than a specified number of standard deviations from a
center value.
Clipped (rejected) pixels are those where::
data < center - (sigma_lower * std)
data > center + (sigma_upper * std)
where::
center = cenfunc(data [, axis=])
std = stdfunc(data [, axis=])
Invalid data values (i.e., NaN or inf) are automatically clipped.
For a functional interface to sigma clipping, see
:func:`sigma_clip`.
.. note::
`scipy.stats.sigmaclip` provides a subset of the functionality
in this class. Also, its input data cannot be a masked array
and it does not handle data that contains invalid values (i.e.,
NaN or inf). Also note that it uses the mean as the centering
function. The equivalent settings to `scipy.stats.sigmaclip`
are::
sigclip = SigmaClip(sigma=4., cenfunc='mean', maxiters=None)
sigclip(data, axis=None, masked=False, return_bounds=True)
Parameters
----------
sigma : float, optional
The number of standard deviations to use for both the lower
and upper clipping limit. These limits are overridden by
``sigma_lower`` and ``sigma_upper``, if input. The default is 3.
sigma_lower : float or None, optional
The number of standard deviations to use as the lower bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
sigma_upper : float or None, optional
The number of standard deviations to use as the upper bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
maxiters : int or None, optional
The maximum number of sigma-clipping iterations to perform or
`None` to clip until convergence is achieved (i.e., iterate
until the last iteration clips nothing). If convergence is
achieved prior to ``maxiters`` iterations, the clipping
iterations will stop. The default is 5.
cenfunc : {'median', 'mean'} or callable, optional
The statistic or callable function/object used to compute
the center value for the clipping. If using a callable
function/object and the ``axis`` keyword is used, then it must
be able to ignore NaNs (e.g., `numpy.nanmean`) and it must have
an ``axis`` keyword to return an array with axis dimension(s)
removed. The default is ``'median'``.
stdfunc : {'std', 'mad_std'} or callable, optional
The statistic or callable function/object used to compute the
standard deviation about the center value. If using a callable
function/object and the ``axis`` keyword is used, then it must
be able to ignore NaNs (e.g., `numpy.nanstd`) and it must have
an ``axis`` keyword to return an array with axis dimension(s)
removed. The default is ``'std'``.
grow : float or `False`, optional
Radius within which to mask the neighbouring pixels of those
that fall outwith the clipping limits (only applied along
``axis``, if specified). As an example, for a 2D image a value
of 1 will mask the nearest pixels in a cross pattern around each
deviant pixel, while 1.5 will also reject the nearest diagonal
neighbours and so on.
See Also
--------
sigma_clip, sigma_clipped_stats
Notes
-----
The best performance will typically be obtained by setting
``cenfunc`` and ``stdfunc`` to one of the built-in functions
specified as as string. If one of the options is set to a string
while the other has a custom callable, you may in some cases see
better performance if you have the `bottleneck`_ package installed.
.. _bottleneck: https://github.com/pydata/bottleneck
Examples
--------
This example uses a data array of random variates from a Gaussian
distribution. We clip all points that are more than 2 sample
standard deviations from the median. The result is a masked array,
where the mask is `True` for clipped data::
>>> from astropy.stats import SigmaClip
>>> from numpy.random import randn
>>> randvar = randn(10000)
>>> sigclip = SigmaClip(sigma=2, maxiters=5)
>>> filtered_data = sigclip(randvar)
This example clips all points that are more than 3 sigma relative
to the sample *mean*, clips until convergence, returns an unmasked
`~numpy.ndarray`, and modifies the data in-place::
>>> from astropy.stats import SigmaClip
>>> from numpy.random import randn
>>> from numpy import mean
>>> randvar = randn(10000)
>>> sigclip = SigmaClip(sigma=3, maxiters=None, cenfunc='mean')
>>> filtered_data = sigclip(randvar, masked=False, copy=False)
This example sigma clips along one axis::
>>> from astropy.stats import SigmaClip
>>> from numpy.random import normal
>>> from numpy import arange, diag, ones
>>> data = arange(5) + normal(0., 0.05, (5, 5)) + diag(ones(5))
>>> sigclip = SigmaClip(sigma=2.3)
>>> filtered_data = sigclip(data, axis=0)
Note that along the other axis, no points would be clipped, as the
standard deviation is higher.
"""
def __init__(self, sigma=3., sigma_lower=None, sigma_upper=None,
maxiters=5, cenfunc='median', stdfunc='std', grow=False):
self.sigma = sigma
self.sigma_lower = sigma_lower or sigma
self.sigma_upper = sigma_upper or sigma
self.maxiters = maxiters or np.inf
self.cenfunc = cenfunc
self.stdfunc = stdfunc
self._cenfunc_parsed = self._parse_cenfunc(cenfunc)
self._stdfunc_parsed = self._parse_stdfunc(stdfunc)
self._min_value = np.nan
self._max_value = np.nan
self._niterations = 0
self.grow = grow
# This just checks that SciPy is available, to avoid failing
# later than necessary if __call__ needs it:
if self.grow:
from scipy.ndimage import binary_dilation
self._binary_dilation = binary_dilation
def __repr__(self):
return ('SigmaClip(sigma={}, sigma_lower={}, sigma_upper={}, '
'maxiters={}, cenfunc={}, stdfunc={}, grow={})'
.format(self.sigma, self.sigma_lower, self.sigma_upper,
self.maxiters, repr(self.cenfunc), repr(self.stdfunc),
self.grow))
def __str__(self):
lines = ['<' + self.__class__.__name__ + '>']
attrs = ['sigma', 'sigma_lower', 'sigma_upper', 'maxiters', 'cenfunc',
'stdfunc', 'grow']
for attr in attrs:
lines.append(f' {attr}: {repr(getattr(self, attr))}')
return '\n'.join(lines)
@staticmethod
def _parse_cenfunc(cenfunc):
if isinstance(cenfunc, str):
if cenfunc == 'median':
if HAS_BOTTLENECK:
cenfunc = _nanmedian
else:
cenfunc = np.nanmedian # pragma: no cover
elif cenfunc == 'mean':
if HAS_BOTTLENECK:
cenfunc = _nanmean
else:
cenfunc = np.nanmean # pragma: no cover
else:
raise ValueError(f'{cenfunc} is an invalid cenfunc.')
return cenfunc
@staticmethod
def _parse_stdfunc(stdfunc):
if isinstance(stdfunc, str):
if stdfunc == 'std':
if HAS_BOTTLENECK:
stdfunc = _nanstd
else:
stdfunc = np.nanstd # pragma: no cover
elif stdfunc == 'mad_std':
stdfunc = _nanmadstd
else:
raise ValueError(f'{stdfunc} is an invalid stdfunc.')
return stdfunc
def _compute_bounds(self, data, axis=None):
# ignore RuntimeWarning if the array (or along an axis) has only
# NaNs
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
self._max_value = self._cenfunc_parsed(data, axis=axis)
std = self._stdfunc_parsed(data, axis=axis)
self._min_value = self._max_value - (std * self.sigma_lower)
self._max_value += std * self.sigma_upper
def _sigmaclip_fast(self, data, axis=None,
masked=True, return_bounds=False,
copy=True):
"""
Fast C implementation for simple use cases.
"""
if isinstance(data, Quantity):
data, unit = data.value, data.unit
else:
unit = None
if copy is False and masked is False and data.dtype.kind != 'f':
raise Exception("cannot mask non-floating-point array with NaN "
"values, set copy=True or masked=True to avoid "
"this.")
if axis is None:
axis = -1 if data.ndim == 1 else tuple(range(data.ndim))
if not isiterable(axis):
axis = normalize_axis_index(axis, data.ndim)
data_reshaped = data
transposed_shape = None
else:
# The gufunc implementation does not handle non-scalar axis
# so we combine the dimensions together as the last
# dimension and set axis=-1
axis = tuple(normalize_axis_index(ax, data.ndim) for ax in axis)
transposed_axes = tuple(ax for ax in range(data.ndim)
if ax not in axis) + axis
data_transposed = data.transpose(transposed_axes)
transposed_shape = data_transposed.shape
data_reshaped = data_transposed.reshape(
transposed_shape[:data.ndim - len(axis)] + (-1,))
axis = -1
if data_reshaped.dtype.kind != 'f' or data_reshaped.dtype.itemsize > 8:
data_reshaped = data_reshaped.astype(float)
mask = ~np.isfinite(data_reshaped)
if np.any(mask):
warnings.warn('Input data contains invalid values (NaNs or '
'infs), which were automatically clipped.',
AstropyUserWarning)
if isinstance(data_reshaped, np.ma.MaskedArray):
mask |= data_reshaped.mask
data = data.view(np.ndarray)
data_reshaped = data_reshaped.view(np.ndarray)
mask = np.broadcast_to(mask, data_reshaped.shape).copy()
bound_lo, bound_hi = _sigma_clip_fast(
data_reshaped, mask, self.cenfunc == 'median',
self.stdfunc == 'mad_std',
-1 if np.isinf(self.maxiters) else self.maxiters,
self.sigma_lower, self.sigma_upper, axis=axis)
with np.errstate(invalid='ignore'):
mask |= data_reshaped < np.expand_dims(bound_lo, axis)
mask |= data_reshaped > np.expand_dims(bound_hi, axis)
if transposed_shape is not None:
# Get mask in shape of data.
mask = mask.reshape(transposed_shape)
mask = mask.transpose(tuple(transposed_axes.index(ax)
for ax in range(data.ndim)))
if masked:
result = np.ma.array(data, mask=mask, copy=copy)
else:
if copy:
result = data.astype(float, copy=True)
else:
result = data
result[mask] = np.nan
if unit is not None:
result = result << unit
bound_lo = bound_lo << unit
bound_hi = bound_hi << unit
if return_bounds:
return result, bound_lo, bound_hi
else:
return result
def _sigmaclip_noaxis(self, data, masked=True, return_bounds=False,
copy=True):
"""
Sigma clip when ``axis`` is None and ``grow`` is not >0.
In this simple case, we remove clipped elements from the
flattened array during each iteration.
"""
filtered_data = data.ravel()
# remove masked values and convert to ndarray
if isinstance(filtered_data, np.ma.MaskedArray):
filtered_data = filtered_data.data[~filtered_data.mask]
# remove invalid values
good_mask = np.isfinite(filtered_data)
if np.any(~good_mask):
filtered_data = filtered_data[good_mask]
warnings.warn('Input data contains invalid values (NaNs or '
'infs), which were automatically clipped.',
AstropyUserWarning)
nchanged = 1
iteration = 0
while nchanged != 0 and (iteration < self.maxiters):
iteration += 1
size = filtered_data.size
self._compute_bounds(filtered_data, axis=None)
filtered_data = filtered_data[
(filtered_data >= self._min_value)
& (filtered_data <= self._max_value)]
nchanged = size - filtered_data.size
self._niterations = iteration
if masked:
# return a masked array and optional bounds
filtered_data = np.ma.masked_invalid(data, copy=copy)
# update the mask in place, ignoring RuntimeWarnings for
# comparisons with NaN data values
with np.errstate(invalid='ignore'):
filtered_data.mask |= np.logical_or(data < self._min_value,
data > self._max_value)
if return_bounds:
return filtered_data, self._min_value, self._max_value
else:
return filtered_data
def _sigmaclip_withaxis(self, data, axis=None, masked=True,
return_bounds=False, copy=True):
"""
Sigma clip the data when ``axis`` or ``grow`` is specified.
In this case, we replace clipped values with NaNs as placeholder
values.
"""
# float array type is needed to insert nans into the array
filtered_data = data.astype(float) # also makes a copy
# remove invalid values
bad_mask = ~np.isfinite(filtered_data)
if np.any(bad_mask):
filtered_data[bad_mask] = np.nan
warnings.warn('Input data contains invalid values (NaNs or '
'infs), which were automatically clipped.',
AstropyUserWarning)
# remove masked values and convert to plain ndarray
if isinstance(filtered_data, np.ma.MaskedArray):
filtered_data = np.ma.masked_invalid(filtered_data).astype(float)
filtered_data = filtered_data.filled(np.nan)
if axis is not None:
# convert negative axis/axes
if not isiterable(axis):
axis = (axis,)
axis = tuple(filtered_data.ndim + n if n < 0 else n for n in axis)
# define the shape of min/max arrays so that they can be broadcast
# with the data
mshape = tuple(1 if dim in axis else size
for dim, size in enumerate(filtered_data.shape))
if self.grow:
# Construct a growth kernel from the specified radius in
# pixels (consider caching this for re-use by subsequent
# calls?):
cenidx = int(self.grow)
size = 2 * cenidx + 1
indices = np.mgrid[(slice(0, size),) * data.ndim]
if axis is not None:
for n, dim in enumerate(indices):
# For any axes that we're not clipping over, set
# their indices outside the growth radius, so masked
# points won't "grow" in that dimension:
if n not in axis:
dim[dim != cenidx] = size
kernel = (sum((idx - cenidx)**2 for idx in indices)
<= self.grow**2)
del indices
nchanged = 1
iteration = 0
while nchanged != 0 and (iteration < self.maxiters):
iteration += 1
self._compute_bounds(filtered_data, axis=axis)
if not np.isscalar(self._min_value):
self._min_value = self._min_value.reshape(mshape)
self._max_value = self._max_value.reshape(mshape)
with np.errstate(invalid='ignore'):
# Since these comparisons are always False for NaNs, the
# resulting mask contains only newly-rejected pixels and
# we can dilate it without growing masked pixels more
# than once.
new_mask = ((filtered_data < self._min_value)
| (filtered_data > self._max_value))
if self.grow:
new_mask = self._binary_dilation(new_mask, kernel)
filtered_data[new_mask] = np.nan
nchanged = np.count_nonzero(new_mask)
del new_mask
self._niterations = iteration
if masked:
# create an output masked array
if copy:
filtered_data = np.ma.MaskedArray(data,
~np.isfinite(filtered_data),
copy=True)
else:
# ignore RuntimeWarnings for comparisons with NaN data values
with np.errstate(invalid='ignore'):
out = np.ma.masked_invalid(data, copy=False)
filtered_data = np.ma.masked_where(np.logical_or(
out < self._min_value, out > self._max_value),
out, copy=False)
if return_bounds:
return filtered_data, self._min_value, self._max_value
else:
return filtered_data
def __call__(self, data, axis=None, masked=True, return_bounds=False,
copy=True):
"""
Perform sigma clipping on the provided data.
Parameters
----------
data : array-like or `~numpy.ma.MaskedArray`
The data to be sigma clipped.
axis : None or int or tuple of int, optional
The axis or axes along which to sigma clip the data. If
`None`, then the flattened data will be used. ``axis`` is
passed to the ``cenfunc`` and ``stdfunc``. The default is
`None`.
masked : bool, optional
If `True`, then a `~numpy.ma.MaskedArray` is returned, where
the mask is `True` for clipped values. If `False`, then a
`~numpy.ndarray` is returned. The default is `True`.
return_bounds : bool, optional
If `True`, then the minimum and maximum clipping bounds are
also returned.
copy : bool, optional
If `True`, then the ``data`` array will be copied. If
`False` and ``masked=True``, then the returned masked array
data will contain the same array as the input ``data`` (if
``data`` is a `~numpy.ndarray` or `~numpy.ma.MaskedArray`).
If `False` and ``masked=False``, the input data is modified
in-place. The default is `True`.
Returns
-------
result : array-like
If ``masked=True``, then a `~numpy.ma.MaskedArray` is
returned, where the mask is `True` for clipped values and
where the input mask was `True`.
If ``masked=False``, then a `~numpy.ndarray` is returned.
If ``return_bounds=True``, then in addition to the masked
array or array above, the minimum and maximum clipping
bounds are returned.
If ``masked=False`` and ``axis=None``, then the output
array is a flattened 1D `~numpy.ndarray` where the clipped
values have been removed. If ``return_bounds=True`` then the
returned minimum and maximum thresholds are scalars.
If ``masked=False`` and ``axis`` is specified, then the
output `~numpy.ndarray` will have the same shape as the
input ``data`` and contain ``np.nan`` where values were
clipped. If the input ``data`` was a masked array, then the
output `~numpy.ndarray` will also contain ``np.nan`` where
the input mask was `True`. If ``return_bounds=True`` then
the returned minimum and maximum clipping thresholds will be
be `~numpy.ndarray`\\s.
"""
data = np.asanyarray(data)
if data.size == 0:
if masked:
result = np.ma.MaskedArray(data)
else:
result = data
if return_bounds:
return result, self._min_value, self._max_value
else:
return result
if isinstance(data, np.ma.MaskedArray) and data.mask.all():
if masked:
result = data
else:
result = np.full(data.shape, np.nan)
if return_bounds:
return result, self._min_value, self._max_value
else:
return result
# Shortcut for common cases where a fast C implementation can be
# used.
if (self.cenfunc in ('mean', 'median')
and self.stdfunc in ('std', 'mad_std')
and axis is not None and not self.grow):
return self._sigmaclip_fast(data, axis=axis, masked=masked,
return_bounds=return_bounds,
copy=copy)
# These two cases are treated separately because when
# ``axis=None`` we can simply remove clipped values from the
# array. This is not possible when ``axis`` or ``grow`` is
# specified.
if axis is None and not self.grow:
return self._sigmaclip_noaxis(data, masked=masked,
return_bounds=return_bounds,
copy=copy)
else:
return self._sigmaclip_withaxis(data, axis=axis, masked=masked,
return_bounds=return_bounds,
copy=copy)
def sigma_clip(data, sigma=3, sigma_lower=None, sigma_upper=None, maxiters=5,
cenfunc='median', stdfunc='std', axis=None, masked=True,
return_bounds=False, copy=True, grow=False):
"""
Perform sigma-clipping on the provided data.
The data will be iterated over, each time rejecting values that are
less or more than a specified number of standard deviations from a
center value.
Clipped (rejected) pixels are those where::
data < center - (sigma_lower * std)
data > center + (sigma_upper * std)
where::
center = cenfunc(data [, axis=])
std = stdfunc(data [, axis=])
Invalid data values (i.e., NaN or inf) are automatically clipped.
For an object-oriented interface to sigma clipping, see
:class:`SigmaClip`.
.. note::
`scipy.stats.sigmaclip` provides a subset of the functionality
in this class. Also, its input data cannot be a masked array
and it does not handle data that contains invalid values (i.e.,
NaN or inf). Also note that it uses the mean as the centering
function. The equivalent settings to `scipy.stats.sigmaclip`
are::
sigma_clip(sigma=4., cenfunc='mean', maxiters=None, axis=None,
... masked=False, return_bounds=True)
Parameters
----------
data : array-like or `~numpy.ma.MaskedArray`
The data to be sigma clipped.
sigma : float, optional
The number of standard deviations to use for both the lower
and upper clipping limit. These limits are overridden by
``sigma_lower`` and ``sigma_upper``, if input. The default is 3.
sigma_lower : float or None, optional
The number of standard deviations to use as the lower bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
sigma_upper : float or None, optional
The number of standard deviations to use as the upper bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
maxiters : int or None, optional
The maximum number of sigma-clipping iterations to perform or
`None` to clip until convergence is achieved (i.e., iterate
until the last iteration clips nothing). If convergence is
achieved prior to ``maxiters`` iterations, the clipping
iterations will stop. The default is 5.
cenfunc : {'median', 'mean'} or callable, optional
The statistic or callable function/object used to compute
the center value for the clipping. If using a callable
function/object and the ``axis`` keyword is used, then it must
be able to ignore NaNs (e.g., `numpy.nanmean`) and it must have
an ``axis`` keyword to return an array with axis dimension(s)
removed. The default is ``'median'``.
stdfunc : {'std', 'mad_std'} or callable, optional
The statistic or callable function/object used to compute the
standard deviation about the center value. If using a callable
function/object and the ``axis`` keyword is used, then it must
be able to ignore NaNs (e.g., `numpy.nanstd`) and it must have
an ``axis`` keyword to return an array with axis dimension(s)
removed. The default is ``'std'``.
axis : None or int or tuple of int, optional
The axis or axes along which to sigma clip the data. If `None`,
then the flattened data will be used. ``axis`` is passed to the
``cenfunc`` and ``stdfunc``. The default is `None`.
masked : bool, optional
If `True`, then a `~numpy.ma.MaskedArray` is returned, where
the mask is `True` for clipped values. If `False`, then a
`~numpy.ndarray` and the minimum and maximum clipping thresholds
are returned. The default is `True`.
return_bounds : bool, optional
If `True`, then the minimum and maximum clipping bounds are also
returned.
copy : bool, optional
If `True`, then the ``data`` array will be copied. If `False`
and ``masked=True``, then the returned masked array data will
contain the same array as the input ``data`` (if ``data`` is a
`~numpy.ndarray` or `~numpy.ma.MaskedArray`). If `False` and
``masked=False``, the input data is modified in-place. The
default is `True`.
grow : float or `False`, optional
Radius within which to mask the neighbouring pixels of those
that fall outwith the clipping limits (only applied along
``axis``, if specified). As an example, for a 2D image a value
of 1 will mask the nearest pixels in a cross pattern around each
deviant pixel, while 1.5 will also reject the nearest diagonal
neighbours and so on.
Returns
-------
result : array-like
If ``masked=True``, then a `~numpy.ma.MaskedArray` is returned,
where the mask is `True` for clipped values and where the input
mask was `True`.
If ``masked=False``, then a `~numpy.ndarray` is returned.
If ``return_bounds=True``, then in addition to the masked array
or array above, the minimum and maximum clipping bounds are
returned.
If ``masked=False`` and ``axis=None``, then the output array
is a flattened 1D `~numpy.ndarray` where the clipped values
have been removed. If ``return_bounds=True`` then the returned
minimum and maximum thresholds are scalars.
If ``masked=False`` and ``axis`` is specified, then the output
`~numpy.ndarray` will have the same shape as the input ``data``
and contain ``np.nan`` where values were clipped. If the input
``data`` was a masked array, then the output `~numpy.ndarray`
will also contain ``np.nan`` where the input mask was `True`.
If ``return_bounds=True`` then the returned minimum and maximum
clipping thresholds will be be `~numpy.ndarray`\\s.
See Also
--------
SigmaClip, sigma_clipped_stats
Notes
-----
The best performance will typically be obtained by setting
``cenfunc`` and ``stdfunc`` to one of the built-in functions
specified as as string. If one of the options is set to a string
while the other has a custom callable, you may in some cases see
better performance if you have the `bottleneck`_ package installed.
.. _bottleneck: https://github.com/pydata/bottleneck
Examples
--------
This example uses a data array of random variates from a Gaussian
distribution. We clip all points that are more than 2 sample
standard deviations from the median. The result is a masked array,
where the mask is `True` for clipped data::
>>> from astropy.stats import sigma_clip
>>> from numpy.random import randn
>>> randvar = randn(10000)
>>> filtered_data = sigma_clip(randvar, sigma=2, maxiters=5)
This example clips all points that are more than 3 sigma relative
to the sample *mean*, clips until convergence, returns an unmasked
`~numpy.ndarray`, and does not copy the data::
>>> from astropy.stats import sigma_clip
>>> from numpy.random import randn
>>> from numpy import mean
>>> randvar = randn(10000)
>>> filtered_data = sigma_clip(randvar, sigma=3, maxiters=None,
... cenfunc=mean, masked=False, copy=False)
This example sigma clips along one axis::
>>> from astropy.stats import sigma_clip
>>> from numpy.random import normal
>>> from numpy import arange, diag, ones
>>> data = arange(5) + normal(0., 0.05, (5, 5)) + diag(ones(5))
>>> filtered_data = sigma_clip(data, sigma=2.3, axis=0)
Note that along the other axis, no points would be clipped, as the
standard deviation is higher.
"""
sigclip = SigmaClip(sigma=sigma, sigma_lower=sigma_lower,
sigma_upper=sigma_upper, maxiters=maxiters,
cenfunc=cenfunc, stdfunc=stdfunc, grow=grow)
return sigclip(data, axis=axis, masked=masked,
return_bounds=return_bounds, copy=copy)
def sigma_clipped_stats(data, mask=None, mask_value=None, sigma=3.0,
sigma_lower=None, sigma_upper=None, maxiters=5,
cenfunc='median', stdfunc='std', std_ddof=0,
axis=None, grow=False):
"""
Calculate sigma-clipped statistics on the provided data.
Parameters
----------
data : array-like or `~numpy.ma.MaskedArray`
Data array or object that can be converted to an array.
mask : `numpy.ndarray` (bool), optional
A boolean mask with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Masked pixels are excluded when computing the statistics.
mask_value : float, optional
A data value (e.g., ``0.0``) that is ignored when computing the
statistics. ``mask_value`` will be masked in addition to any
input ``mask``.
sigma : float, optional
The number of standard deviations to use for both the lower
and upper clipping limit. These limits are overridden by
``sigma_lower`` and ``sigma_upper``, if input. The default is 3.
sigma_lower : float or None, optional
The number of standard deviations to use as the lower bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
sigma_upper : float or None, optional
The number of standard deviations to use as the upper bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
maxiters : int or None, optional
The maximum number of sigma-clipping iterations to perform or
`None` to clip until convergence is achieved (i.e., iterate
until the last iteration clips nothing). If convergence is
achieved prior to ``maxiters`` iterations, the clipping
iterations will stop. The default is 5.
cenfunc : {'median', 'mean'} or callable, optional
The statistic or callable function/object used to compute
the center value for the clipping. If using a callable
function/object and the ``axis`` keyword is used, then it must
be able to ignore NaNs (e.g., `numpy.nanmean`) and it must have
an ``axis`` keyword to return an array with axis dimension(s)
removed. The default is ``'median'``.
stdfunc : {'std', 'mad_std'} or callable, optional
The statistic or callable function/object used to compute the
standard deviation about the center value. If using a callable
function/object and the ``axis`` keyword is used, then it must
be able to ignore NaNs (e.g., `numpy.nanstd`) and it must have
an ``axis`` keyword to return an array with axis dimension(s)
removed. The default is ``'std'``.
std_ddof : int, optional
The delta degrees of freedom for the standard deviation
calculation. The divisor used in the calculation is ``N -
std_ddof``, where ``N`` represents the number of elements. The
default is 0.
axis : None or int or tuple of int, optional
The axis or axes along which to sigma clip the data. If `None`,
then the flattened data will be used. ``axis`` is passed to the
``cenfunc`` and ``stdfunc``. The default is `None`.
grow : float or `False`, optional
Radius within which to mask the neighbouring pixels of those
that fall outwith the clipping limits (only applied along
``axis``, if specified). As an example, for a 2D image a value
of 1 will mask the nearest pixels in a cross pattern around each
deviant pixel, while 1.5 will also reject the nearest diagonal
neighbours and so on.
Notes
-----
The best performance will typically be obtained by setting
``cenfunc`` and ``stdfunc`` to one of the built-in functions
specified as as string. If one of the options is set to a string
while the other has a custom callable, you may in some cases see
better performance if you have the `bottleneck`_ package installed.
.. _bottleneck: https://github.com/pydata/bottleneck
Returns
-------
mean, median, stddev : float
The mean, median, and standard deviation of the sigma-clipped
data.
See Also
--------
SigmaClip, sigma_clip
"""
if mask is not None:
data = np.ma.MaskedArray(data, mask)
if mask_value is not None:
data = np.ma.masked_values(data, mask_value)
if isinstance(data, np.ma.MaskedArray) and data.mask.all():
return np.ma.masked, np.ma.masked, np.ma.masked
sigclip = SigmaClip(sigma=sigma, sigma_lower=sigma_lower,
sigma_upper=sigma_upper, maxiters=maxiters,
cenfunc=cenfunc, stdfunc=stdfunc, grow=grow)
data_clipped = sigclip(data, axis=axis, masked=False, return_bounds=False,
copy=True)
if HAS_BOTTLENECK:
mean = _nanmean(data_clipped, axis=axis)
median = _nanmedian(data_clipped, axis=axis)
std = _nanstd(data_clipped, ddof=std_ddof, axis=axis)
else: # pragma: no cover
mean = np.nanmean(data_clipped, axis=axis)
median = np.nanmedian(data_clipped, axis=axis)
std = np.nanstd(data_clipped, ddof=std_ddof, axis=axis)
return mean, median, std
|
37dbbb5faecb79ccc968b7f2681a4afc39e7330456ec836f199554cc2842c8fc | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains simple statistical algorithms that are
straightforwardly implemented as a single python function (or family of
functions).
This module should generally not be used directly. Everything in
`__all__` is imported into `astropy.stats`, and hence that package
should be used for access.
"""
import math
import numpy as np
import astropy.units as u
from . import _stats
__all__ = ['gaussian_fwhm_to_sigma', 'gaussian_sigma_to_fwhm',
'binom_conf_interval', 'binned_binom_proportion',
'poisson_conf_interval', 'median_absolute_deviation', 'mad_std',
'signal_to_noise_oir_ccd', 'bootstrap', 'kuiper', 'kuiper_two',
'kuiper_false_positive_probability', 'cdf_from_intervals',
'interval_overlap_length', 'histogram_intervals', 'fold_intervals']
__doctest_skip__ = ['binned_binom_proportion']
__doctest_requires__ = {'binom_conf_interval': ['scipy'],
'poisson_conf_interval': ['scipy']}
gaussian_sigma_to_fwhm = 2.0 * math.sqrt(2.0 * math.log(2.0))
"""
Factor with which to multiply Gaussian 1-sigma standard deviation to
convert it to full width at half maximum (FWHM).
"""
gaussian_fwhm_to_sigma = 1. / gaussian_sigma_to_fwhm
"""
Factor with which to multiply Gaussian full width at half maximum (FWHM)
to convert it to 1-sigma standard deviation.
"""
# NUMPY_LT_1_18
def _expand_dims(data, axis):
"""
Expand the shape of an array.
Insert a new axis that will appear at the `axis` position in the
expanded array shape.
This function allows for tuple axis arguments.
``numpy.expand_dims`` currently does not allow that, but it will in
numpy v1.18 (https://github.com/numpy/numpy/pull/14051).
``_expand_dims`` can be replaced with ``numpy.expand_dims`` when the
minimum support numpy version is v1.18.
Parameters
----------
data : array-like
Input array.
axis : int or tuple of int
Position in the expanded axes where the new axis (or axes) is
placed. A tuple of axes is now supported. Out of range axes as
described above are now forbidden and raise an `AxisError`.
Returns
-------
result : ndarray
View of ``data`` with the number of dimensions increased.
"""
if isinstance(data, np.matrix):
data = np.asarray(data)
else:
data = np.asanyarray(data)
if not isinstance(axis, (tuple, list)):
axis = (axis,)
out_ndim = len(axis) + data.ndim
axis = np.core.numeric.normalize_axis_tuple(axis, out_ndim)
shape_it = iter(data.shape)
shape = [1 if ax in axis else next(shape_it) for ax in range(out_ndim)]
return data.reshape(shape)
def binom_conf_interval(k, n, confidence_level=0.68269, interval='wilson'):
r"""Binomial proportion confidence interval given k successes,
n trials.
Parameters
----------
k : int or numpy.ndarray
Number of successes (0 <= ``k`` <= ``n``).
n : int or numpy.ndarray
Number of trials (``n`` > 0). If both ``k`` and ``n`` are arrays,
they must have the same shape.
confidence_level : float, optional
Desired probability content of interval. Default is 0.68269,
corresponding to 1 sigma in a 1-dimensional Gaussian distribution.
Confidence level must be in range [0, 1].
interval : {'wilson', 'jeffreys', 'flat', 'wald'}, optional
Formula used for confidence interval. See notes for details. The
``'wilson'`` and ``'jeffreys'`` intervals generally give similar
results, while 'flat' is somewhat different, especially for small
values of ``n``. ``'wilson'`` should be somewhat faster than
``'flat'`` or ``'jeffreys'``. The 'wald' interval is generally not
recommended. It is provided for comparison purposes. Default is
``'wilson'``.
Returns
-------
conf_interval : ndarray
``conf_interval[0]`` and ``conf_interval[1]`` correspond to the lower
and upper limits, respectively, for each element in ``k``, ``n``.
Notes
-----
In situations where a probability of success is not known, it can
be estimated from a number of trials (n) and number of
observed successes (k). For example, this is done in Monte
Carlo experiments designed to estimate a detection efficiency. It
is simple to take the sample proportion of successes (k/n)
as a reasonable best estimate of the true probability
:math:`\epsilon`. However, deriving an accurate confidence
interval on :math:`\epsilon` is non-trivial. There are several
formulas for this interval (see [1]_). Four intervals are implemented
here:
**1. The Wilson Interval.** This interval, attributed to Wilson [2]_,
is given by
.. math::
CI_{\rm Wilson} = \frac{k + \kappa^2/2}{n + \kappa^2}
\pm \frac{\kappa n^{1/2}}{n + \kappa^2}
((\hat{\epsilon}(1 - \hat{\epsilon}) + \kappa^2/(4n))^{1/2}
where :math:`\hat{\epsilon} = k / n` and :math:`\kappa` is the
number of standard deviations corresponding to the desired
confidence interval for a *normal* distribution (for example,
1.0 for a confidence interval of 68.269%). For a
confidence interval of 100(1 - :math:`\alpha`)%,
.. math::
\kappa = \Phi^{-1}(1-\alpha/2) = \sqrt{2}{\rm erf}^{-1}(1-\alpha).
**2. The Jeffreys Interval.** This interval is derived by applying
Bayes' theorem to the binomial distribution with the
noninformative Jeffreys prior [3]_, [4]_. The noninformative Jeffreys
prior is the Beta distribution, Beta(1/2, 1/2), which has the density
function
.. math::
f(\epsilon) = \pi^{-1} \epsilon^{-1/2}(1-\epsilon)^{-1/2}.
The justification for this prior is that it is invariant under
reparameterizations of the binomial proportion.
The posterior density function is also a Beta distribution: Beta(k
+ 1/2, n - k + 1/2). The interval is then chosen so that it is
*equal-tailed*: Each tail (outside the interval) contains
:math:`\alpha`/2 of the posterior probability, and the interval
itself contains 1 - :math:`\alpha`. This interval must be
calculated numerically. Additionally, when k = 0 the lower limit
is set to 0 and when k = n the upper limit is set to 1, so that in
these cases, there is only one tail containing :math:`\alpha`/2
and the interval itself contains 1 - :math:`\alpha`/2 rather than
the nominal 1 - :math:`\alpha`.
**3. A Flat prior.** This is similar to the Jeffreys interval,
but uses a flat (uniform) prior on the binomial proportion
over the range 0 to 1 rather than the reparametrization-invariant
Jeffreys prior. The posterior density function is a Beta distribution:
Beta(k + 1, n - k + 1). The same comments about the nature of the
interval (equal-tailed, etc.) also apply to this option.
**4. The Wald Interval.** This interval is given by
.. math::
CI_{\rm Wald} = \hat{\epsilon} \pm
\kappa \sqrt{\frac{\hat{\epsilon}(1-\hat{\epsilon})}{n}}
The Wald interval gives acceptable results in some limiting
cases. Particularly, when n is very large, and the true proportion
:math:`\epsilon` is not "too close" to 0 or 1. However, as the
later is not verifiable when trying to estimate :math:`\epsilon`,
this is not very helpful. Its use is not recommended, but it is
provided here for comparison purposes due to its prevalence in
everyday practical statistics.
This function requires ``scipy`` for all interval types.
References
----------
.. [1] Brown, Lawrence D.; Cai, T. Tony; DasGupta, Anirban (2001).
"Interval Estimation for a Binomial Proportion". Statistical
Science 16 (2): 101-133. doi:10.1214/ss/1009213286
.. [2] Wilson, E. B. (1927). "Probable inference, the law of
succession, and statistical inference". Journal of the American
Statistical Association 22: 209-212.
.. [3] Jeffreys, Harold (1946). "An Invariant Form for the Prior
Probability in Estimation Problems". Proc. R. Soc. Lond.. A 24 186
(1007): 453-461. doi:10.1098/rspa.1946.0056
.. [4] Jeffreys, Harold (1998). Theory of Probability. Oxford
University Press, 3rd edition. ISBN 978-0198503682
Examples
--------
Integer inputs return an array with shape (2,):
>>> binom_conf_interval(4, 5, interval='wilson') # doctest: +FLOAT_CMP
array([0.57921724, 0.92078259])
Arrays of arbitrary dimension are supported. The Wilson and Jeffreys
intervals give similar results, even for small k, n:
>>> binom_conf_interval([1, 2], 5, interval='wilson') # doctest: +FLOAT_CMP
array([[0.07921741, 0.21597328],
[0.42078276, 0.61736012]])
>>> binom_conf_interval([1, 2,], 5, interval='jeffreys') # doctest: +FLOAT_CMP
array([[0.0842525 , 0.21789949],
[0.42218001, 0.61753691]])
>>> binom_conf_interval([1, 2], 5, interval='flat') # doctest: +FLOAT_CMP
array([[0.12139799, 0.24309021],
[0.45401727, 0.61535699]])
In contrast, the Wald interval gives poor results for small k, n.
For k = 0 or k = n, the interval always has zero length.
>>> binom_conf_interval([1, 2], 5, interval='wald') # doctest: +FLOAT_CMP
array([[0.02111437, 0.18091075],
[0.37888563, 0.61908925]])
For confidence intervals approaching 1, the Wald interval for
0 < k < n can give intervals that extend outside [0, 1]:
>>> binom_conf_interval([1, 2], 5, interval='wald', confidence_level=0.99) # doctest: +FLOAT_CMP
array([[-0.26077835, -0.16433593],
[ 0.66077835, 0.96433593]])
""" # noqa
if confidence_level < 0. or confidence_level > 1.:
raise ValueError('confidence_level must be between 0. and 1.')
alpha = 1. - confidence_level
k = np.asarray(k).astype(int)
n = np.asarray(n).astype(int)
if (n <= 0).any():
raise ValueError('n must be positive')
if (k < 0).any() or (k > n).any():
raise ValueError('k must be in {0, 1, .., n}')
if interval == 'wilson' or interval == 'wald':
from scipy.special import erfinv
kappa = np.sqrt(2.) * min(erfinv(confidence_level), 1.e10) # Avoid overflows.
k = k.astype(float)
n = n.astype(float)
p = k / n
if interval == 'wilson':
midpoint = (k + kappa ** 2 / 2.) / (n + kappa ** 2)
halflength = (kappa * np.sqrt(n)) / (n + kappa ** 2) * \
np.sqrt(p * (1 - p) + kappa ** 2 / (4 * n))
conf_interval = np.array([midpoint - halflength,
midpoint + halflength])
# Correct intervals out of range due to floating point errors.
conf_interval[conf_interval < 0.] = 0.
conf_interval[conf_interval > 1.] = 1.
else:
midpoint = p
halflength = kappa * np.sqrt(p * (1. - p) / n)
conf_interval = np.array([midpoint - halflength,
midpoint + halflength])
elif interval == 'jeffreys' or interval == 'flat':
from scipy.special import betaincinv
if interval == 'jeffreys':
lowerbound = betaincinv(k + 0.5, n - k + 0.5, 0.5 * alpha)
upperbound = betaincinv(k + 0.5, n - k + 0.5, 1. - 0.5 * alpha)
else:
lowerbound = betaincinv(k + 1, n - k + 1, 0.5 * alpha)
upperbound = betaincinv(k + 1, n - k + 1, 1. - 0.5 * alpha)
# Set lower or upper bound to k/n when k/n = 0 or 1
# We have to treat the special case of k/n being scalars,
# which is an ugly kludge
if lowerbound.ndim == 0:
if k == 0:
lowerbound = 0.
elif k == n:
upperbound = 1.
else:
lowerbound[k == 0] = 0
upperbound[k == n] = 1
conf_interval = np.array([lowerbound, upperbound])
else:
raise ValueError(f'Unrecognized interval: {interval:s}')
return conf_interval
def binned_binom_proportion(x, success, bins=10, range=None,
confidence_level=0.68269, interval='wilson'):
"""Binomial proportion and confidence interval in bins of a continuous
variable ``x``.
Given a set of datapoint pairs where the ``x`` values are
continuously distributed and the ``success`` values are binomial
("success / failure" or "true / false"), place the pairs into
bins according to ``x`` value and calculate the binomial proportion
(fraction of successes) and confidence interval in each bin.
Parameters
----------
x : sequence
Values.
success : sequence of bool
Success (`True`) or failure (`False`) corresponding to each value
in ``x``. Must be same length as ``x``.
bins : int or sequence of scalar, optional
If bins is an int, it defines the number of equal-width bins
in the given range (10, by default). If bins is a sequence, it
defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths (in this case, 'range' is ignored).
range : (float, float), optional
The lower and upper range of the bins. If `None` (default),
the range is set to ``(x.min(), x.max())``. Values outside the
range are ignored.
confidence_level : float, optional
Must be in range [0, 1].
Desired probability content in the confidence
interval ``(p - perr[0], p + perr[1])`` in each bin. Default is
0.68269.
interval : {'wilson', 'jeffreys', 'flat', 'wald'}, optional
Formula used to calculate confidence interval on the
binomial proportion in each bin. See `binom_conf_interval` for
definition of the intervals. The 'wilson', 'jeffreys',
and 'flat' intervals generally give similar results. 'wilson'
should be somewhat faster, while 'jeffreys' and 'flat' are
marginally superior, but differ in the assumed prior.
The 'wald' interval is generally not recommended.
It is provided for comparison purposes. Default is 'wilson'.
Returns
-------
bin_ctr : ndarray
Central value of bins. Bins without any entries are not returned.
bin_halfwidth : ndarray
Half-width of each bin such that ``bin_ctr - bin_halfwidth`` and
``bin_ctr + bins_halfwidth`` give the left and right side of each bin,
respectively.
p : ndarray
Efficiency in each bin.
perr : ndarray
2-d array of shape (2, len(p)) representing the upper and lower
uncertainty on p in each bin.
Notes
-----
This function requires ``scipy`` for all interval types.
See Also
--------
binom_conf_interval : Function used to estimate confidence interval in
each bin.
Examples
--------
Suppose we wish to estimate the efficiency of a survey in
detecting astronomical sources as a function of magnitude (i.e.,
the probability of detecting a source given its magnitude). In a
realistic case, we might prepare a large number of sources with
randomly selected magnitudes, inject them into simulated images,
and then record which were detected at the end of the reduction
pipeline. As a toy example, we generate 100 data points with
randomly selected magnitudes between 20 and 30 and "observe" them
with a known detection function (here, the error function, with
50% detection probability at magnitude 25):
>>> from scipy.special import erf
>>> from scipy.stats.distributions import binom
>>> def true_efficiency(x):
... return 0.5 - 0.5 * erf((x - 25.) / 2.)
>>> mag = 20. + 10. * np.random.rand(100)
>>> detected = binom.rvs(1, true_efficiency(mag))
>>> bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20)
>>> plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
... label='estimate')
.. plot::
import numpy as np
from scipy.special import erf
from scipy.stats.distributions import binom
import matplotlib.pyplot as plt
from astropy.stats import binned_binom_proportion
def true_efficiency(x):
return 0.5 - 0.5 * erf((x - 25.) / 2.)
np.random.seed(400)
mag = 20. + 10. * np.random.rand(100)
np.random.seed(600)
detected = binom.rvs(1, true_efficiency(mag))
bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20)
plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
label='estimate')
X = np.linspace(20., 30., 1000)
plt.plot(X, true_efficiency(X), label='true efficiency')
plt.ylim(0., 1.)
plt.title('Detection efficiency vs magnitude')
plt.xlabel('Magnitude')
plt.ylabel('Detection efficiency')
plt.legend()
plt.show()
The above example uses the Wilson confidence interval to calculate
the uncertainty ``perr`` in each bin (see the definition of various
confidence intervals in `binom_conf_interval`). A commonly used
alternative is the Wald interval. However, the Wald interval can
give nonsensical uncertainties when the efficiency is near 0 or 1,
and is therefore **not** recommended. As an illustration, the
following example shows the same data as above but uses the Wald
interval rather than the Wilson interval to calculate ``perr``:
>>> bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20,
... interval='wald')
>>> plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
... label='estimate')
.. plot::
import numpy as np
from scipy.special import erf
from scipy.stats.distributions import binom
import matplotlib.pyplot as plt
from astropy.stats import binned_binom_proportion
def true_efficiency(x):
return 0.5 - 0.5 * erf((x - 25.) / 2.)
np.random.seed(400)
mag = 20. + 10. * np.random.rand(100)
np.random.seed(600)
detected = binom.rvs(1, true_efficiency(mag))
bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20,
interval='wald')
plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
label='estimate')
X = np.linspace(20., 30., 1000)
plt.plot(X, true_efficiency(X), label='true efficiency')
plt.ylim(0., 1.)
plt.title('The Wald interval can give nonsensical uncertainties')
plt.xlabel('Magnitude')
plt.ylabel('Detection efficiency')
plt.legend()
plt.show()
"""
x = np.ravel(x)
success = np.ravel(success).astype(bool)
if x.shape != success.shape:
raise ValueError('sizes of x and success must match')
# Put values into a histogram (`n`). Put "successful" values
# into a second histogram (`k`) with identical binning.
n, bin_edges = np.histogram(x, bins=bins, range=range)
k, bin_edges = np.histogram(x[success], bins=bin_edges)
bin_ctr = (bin_edges[:-1] + bin_edges[1:]) / 2.
bin_halfwidth = bin_ctr - bin_edges[:-1]
# Remove bins with zero entries.
valid = n > 0
bin_ctr = bin_ctr[valid]
bin_halfwidth = bin_halfwidth[valid]
n = n[valid]
k = k[valid]
p = k / n
bounds = binom_conf_interval(k, n, confidence_level=confidence_level, interval=interval)
perr = np.abs(bounds - p)
return bin_ctr, bin_halfwidth, p, perr
def _check_poisson_conf_inputs(sigma, background, confidence_level, name):
if sigma != 1:
raise ValueError(f"Only sigma=1 supported for interval {name}")
if background != 0:
raise ValueError(f"background not supported for interval {name}")
if confidence_level is not None:
raise ValueError(f"confidence_level not supported for interval {name}")
def poisson_conf_interval(n, interval='root-n', sigma=1, background=0,
confidence_level=None):
r"""Poisson parameter confidence interval given observed counts
Parameters
----------
n : int or numpy.ndarray
Number of counts (0 <= ``n``).
interval : {'root-n','root-n-0','pearson','sherpagehrels','frequentist-confidence', 'kraft-burrows-nousek'}, optional
Formula used for confidence interval. See notes for details.
Default is ``'root-n'``.
sigma : float, optional
Number of sigma for confidence interval; only supported for
the 'frequentist-confidence' mode.
background : float, optional
Number of counts expected from the background; only supported for
the 'kraft-burrows-nousek' mode. This number is assumed to be determined
from a large region so that the uncertainty on its value is negligible.
confidence_level : float, optional
Confidence level between 0 and 1; only supported for the
'kraft-burrows-nousek' mode.
Returns
-------
conf_interval : ndarray
``conf_interval[0]`` and ``conf_interval[1]`` correspond to the lower
and upper limits, respectively, for each element in ``n``.
Notes
-----
The "right" confidence interval to use for Poisson data is a
matter of debate. The CDF working group `recommends
<https://web.archive.org/web/20210222093249/https://www-cdf.fnal.gov/physics/statistics/notes/pois_eb.txt>`_
using root-n throughout, largely in the interest of
comprehensibility, but discusses other possibilities. The ATLAS
group also discusses several
possibilities but concludes that no single representation is
suitable for all cases. The suggestion has also been `floated
<https://ui.adsabs.harvard.edu/abs/2012EPJP..127...24A>`_ that error
bars should be attached to theoretical predictions instead of
observed data, which this function will not help with (but it's
easy; then you really should use the square root of the theoretical
prediction).
The intervals implemented here are:
**1. 'root-n'** This is a very widely used standard rule derived
from the maximum-likelihood estimator for the mean of the Poisson
process. While it produces questionable results for small n and
outright wrong results for n=0, it is standard enough that people are
(supposedly) used to interpreting these wonky values. The interval is
.. math::
CI = (n-\sqrt{n}, n+\sqrt{n})
**2. 'root-n-0'** This is identical to the above except that where
n is zero the interval returned is (0,1).
**3. 'pearson'** This is an only-slightly-more-complicated rule
based on Pearson's chi-squared rule (as `explained
<https://web.archive.org/web/20210222093249/https://www-cdf.fnal.gov/physics/statistics/notes/pois_eb.txt>`_ by
the CDF working group). It also has the nice feature that if your
theory curve touches an endpoint of the interval, then your data
point is indeed one sigma away. The interval is
.. math::
CI = (n+0.5-\sqrt{n+0.25}, n+0.5+\sqrt{n+0.25})
**4. 'sherpagehrels'** This rule is used by default in the fitting
package 'sherpa'. The `documentation
<https://cxc.harvard.edu/sherpa4.4/statistics/#chigehrels>`_ claims
it is based on a numerical approximation published in `Gehrels
(1986) <https://ui.adsabs.harvard.edu/abs/1986ApJ...303..336G>`_ but it
does not actually appear there. It is symmetrical, and while the
upper limits are within about 1% of those given by
'frequentist-confidence', the lower limits can be badly wrong. The
interval is
.. math::
CI = (n-1-\sqrt{n+0.75}, n+1+\sqrt{n+0.75})
**5. 'frequentist-confidence'** These are frequentist central
confidence intervals:
.. math::
CI = (0.5 F_{\chi^2}^{-1}(\alpha;2n),
0.5 F_{\chi^2}^{-1}(1-\alpha;2(n+1)))
where :math:`F_{\chi^2}^{-1}` is the quantile of the chi-square
distribution with the indicated number of degrees of freedom and
:math:`\alpha` is the one-tailed probability of the normal
distribution (at the point given by the parameter 'sigma'). See
`Maxwell (2011)
<https://ui.adsabs.harvard.edu/abs/2011arXiv1102.0822M>`_ for further
details.
**6. 'kraft-burrows-nousek'** This is a Bayesian approach which allows
for the presence of a known background :math:`B` in the source signal
:math:`N`.
For a given confidence level :math:`CL` the confidence interval
:math:`[S_\mathrm{min}, S_\mathrm{max}]` is given by:
.. math::
CL = \int^{S_\mathrm{max}}_{S_\mathrm{min}} f_{N,B}(S)dS
where the function :math:`f_{N,B}` is:
.. math::
f_{N,B}(S) = C \frac{e^{-(S+B)}(S+B)^N}{N!}
and the normalization constant :math:`C`:
.. math::
C = \left[ \int_0^\infty \frac{e^{-(S+B)}(S+B)^N}{N!} dS \right] ^{-1}
= \left( \sum^N_{n=0} \frac{e^{-B}B^n}{n!} \right)^{-1}
See `Kraft, Burrows, and Nousek (1991)
<https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_ for further
details.
These formulas implement a positive, uniform prior.
`Kraft, Burrows, and Nousek (1991)
<https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_ discuss this
choice in more detail and show that the problem is relatively
insensitive to the choice of prior.
This function has an optional dependency: Either `Scipy
<https://www.scipy.org/>`_ or `mpmath <http://mpmath.org/>`_ need
to be available (Scipy works only for N < 100).
This code is very intense numerically, which makes it much slower than
the other methods, in particular for large count numbers (above 1000
even with ``mpmath``). Fortunately, some of the other methods or a
Gaussian approximation usually work well in this regime.
Examples
--------
>>> poisson_conf_interval(np.arange(10), interval='root-n').T
array([[ 0. , 0. ],
[ 0. , 2. ],
[ 0.58578644, 3.41421356],
[ 1.26794919, 4.73205081],
[ 2. , 6. ],
[ 2.76393202, 7.23606798],
[ 3.55051026, 8.44948974],
[ 4.35424869, 9.64575131],
[ 5.17157288, 10.82842712],
[ 6. , 12. ]])
>>> poisson_conf_interval(np.arange(10), interval='root-n-0').T
array([[ 0. , 1. ],
[ 0. , 2. ],
[ 0.58578644, 3.41421356],
[ 1.26794919, 4.73205081],
[ 2. , 6. ],
[ 2.76393202, 7.23606798],
[ 3.55051026, 8.44948974],
[ 4.35424869, 9.64575131],
[ 5.17157288, 10.82842712],
[ 6. , 12. ]])
>>> poisson_conf_interval(np.arange(10), interval='pearson').T
array([[ 0. , 1. ],
[ 0.38196601, 2.61803399],
[ 1. , 4. ],
[ 1.69722436, 5.30277564],
[ 2.43844719, 6.56155281],
[ 3.20871215, 7.79128785],
[ 4. , 9. ],
[ 4.8074176 , 10.1925824 ],
[ 5.62771868, 11.37228132],
[ 6.45861873, 12.54138127]])
>>> poisson_conf_interval(
... np.arange(10), interval='frequentist-confidence').T
array([[ 0. , 1.84102165],
[ 0.17275378, 3.29952656],
[ 0.70818544, 4.63785962],
[ 1.36729531, 5.91818583],
[ 2.08566081, 7.16275317],
[ 2.84030886, 8.38247265],
[ 3.62006862, 9.58364155],
[ 4.41852954, 10.77028072],
[ 5.23161394, 11.94514152],
[ 6.05653896, 13.11020414]])
>>> poisson_conf_interval(
... 7, interval='frequentist-confidence').T
array([ 4.41852954, 10.77028072])
>>> poisson_conf_interval(
... 10, background=1.5, confidence_level=0.95,
... interval='kraft-burrows-nousek').T # doctest: +FLOAT_CMP
array([[ 3.47894005, 16.113329533]])
""" # noqa
if not np.isscalar(n):
n = np.asanyarray(n)
if interval == 'root-n':
_check_poisson_conf_inputs(sigma, background, confidence_level, interval)
conf_interval = np.array([n - np.sqrt(n),
n + np.sqrt(n)])
elif interval == 'root-n-0':
_check_poisson_conf_inputs(sigma, background, confidence_level, interval)
conf_interval = np.array([n - np.sqrt(n),
n + np.sqrt(n)])
if np.isscalar(n):
if n == 0:
conf_interval[1] = 1
else:
conf_interval[1, n == 0] = 1
elif interval == 'pearson':
_check_poisson_conf_inputs(sigma, background, confidence_level, interval)
conf_interval = np.array([n + 0.5 - np.sqrt(n + 0.25),
n + 0.5 + np.sqrt(n + 0.25)])
elif interval == 'sherpagehrels':
_check_poisson_conf_inputs(sigma, background, confidence_level, interval)
conf_interval = np.array([n - 1 - np.sqrt(n + 0.75),
n + 1 + np.sqrt(n + 0.75)])
elif interval == 'frequentist-confidence':
_check_poisson_conf_inputs(1., background, confidence_level, interval)
import scipy.stats
alpha = scipy.stats.norm.sf(sigma)
conf_interval = np.array([0.5 * scipy.stats.chi2(2 * n).ppf(alpha),
0.5 * scipy.stats.chi2(2 * n + 2).isf(alpha)])
if np.isscalar(n):
if n == 0:
conf_interval[0] = 0
else:
conf_interval[0, n == 0] = 0
elif interval == 'kraft-burrows-nousek':
# Deprecation warning in Python 3.9 when N is float, so we force int,
# see https://github.com/astropy/astropy/issues/10832
if np.isscalar(n):
if not isinstance(n, int):
raise TypeError('Number of counts must be integer.')
elif not issubclass(n.dtype.type, np.integer):
raise TypeError('Number of counts must be integer.')
if confidence_level is None:
raise ValueError('Set confidence_level for method {}. (sigma is '
'ignored.)'.format(interval))
confidence_level = np.asanyarray(confidence_level)
if np.any(confidence_level <= 0) or np.any(confidence_level >= 1):
raise ValueError('confidence_level must be a number between 0 and 1.')
background = np.asanyarray(background)
if np.any(background < 0):
raise ValueError('Background must be >= 0.')
conf_interval = np.vectorize(_kraft_burrows_nousek,
cache=True)(n, background, confidence_level)
conf_interval = np.vstack(conf_interval)
else:
raise ValueError(f"Invalid method for Poisson confidence intervals: {interval}")
return conf_interval
def median_absolute_deviation(data, axis=None, func=None, ignore_nan=False):
"""
Calculate the median absolute deviation (MAD).
The MAD is defined as ``median(abs(a - median(a)))``.
Parameters
----------
data : array-like
Input array or object that can be converted to an array.
axis : None, int, or tuple of int, optional
The axis or axes along which the MADs are computed. The default
(`None`) is to compute the MAD of the flattened array.
func : callable, optional
The function used to compute the median. Defaults to `numpy.ma.median`
for masked arrays, otherwise to `numpy.median`.
ignore_nan : bool
Ignore NaN values (treat them as if they are not in the array) when
computing the median. This will use `numpy.ma.median` if ``axis`` is
specified, or `numpy.nanmedian` if ``axis==None`` and numpy's version
is >1.10 because nanmedian is slightly faster in this case.
Returns
-------
mad : float or `~numpy.ndarray`
The median absolute deviation of the input array. If ``axis``
is `None` then a scalar will be returned, otherwise a
`~numpy.ndarray` will be returned.
Examples
--------
Generate random variates from a Gaussian distribution and return the
median absolute deviation for that distribution::
>>> import numpy as np
>>> from astropy.stats import median_absolute_deviation
>>> rand = np.random.default_rng(12345)
>>> from numpy.random import randn
>>> mad = median_absolute_deviation(rand.standard_normal(1000))
>>> print(mad) # doctest: +FLOAT_CMP
0.6829504282771885
See Also
--------
mad_std
"""
if func is None:
# Check if the array has a mask and if so use np.ma.median
# See https://github.com/numpy/numpy/issues/7330 why using np.ma.median
# for normal arrays should not be done (summary: np.ma.median always
# returns an masked array even if the result should be scalar). (#4658)
if isinstance(data, np.ma.MaskedArray):
is_masked = True
func = np.ma.median
if ignore_nan:
data = np.ma.masked_where(np.isnan(data), data, copy=True)
elif ignore_nan:
is_masked = False
func = np.nanmedian
else:
is_masked = False
func = np.median # drops units if result is NaN
else:
is_masked = None
data = np.asanyarray(data)
# np.nanmedian has `keepdims`, which is a good option if we're not allowing
# user-passed functions here
data_median = func(data, axis=axis)
# this conditional can be removed after this PR is merged:
# https://github.com/astropy/astropy/issues/12165
if (isinstance(data, u.Quantity) and func is np.median
and data_median.ndim == 0 and np.isnan(data_median)):
data_median = data.__array_wrap__(data_median)
# broadcast the median array before subtraction
if axis is not None:
data_median = _expand_dims(data_median, axis=axis) # NUMPY_LT_1_18
result = func(np.abs(data - data_median), axis=axis, overwrite_input=True)
# this conditional can be removed after this PR is merged:
# https://github.com/astropy/astropy/issues/12165
if (isinstance(data, u.Quantity) and func is np.median
and result.ndim == 0 and np.isnan(result)):
result = data.__array_wrap__(result)
if axis is None and np.ma.isMaskedArray(result):
# return scalar version
result = result.item()
elif np.ma.isMaskedArray(result) and not is_masked:
# if the input array was not a masked array, we don't want to return a
# masked array
result = result.filled(fill_value=np.nan)
return result
def mad_std(data, axis=None, func=None, ignore_nan=False):
r"""
Calculate a robust standard deviation using the `median absolute
deviation (MAD)
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_.
The standard deviation estimator is given by:
.. math::
\sigma \approx \frac{\textrm{MAD}}{\Phi^{-1}(3/4)}
\approx 1.4826 \ \textrm{MAD}
where :math:`\Phi^{-1}(P)` is the normal inverse cumulative
distribution function evaluated at probability :math:`P = 3/4`.
Parameters
----------
data : array-like
Data array or object that can be converted to an array.
axis : None, int, or tuple of int, optional
The axis or axes along which the robust standard deviations are
computed. The default (`None`) is to compute the robust
standard deviation of the flattened array.
func : callable, optional
The function used to compute the median. Defaults to `numpy.ma.median`
for masked arrays, otherwise to `numpy.median`.
ignore_nan : bool
Ignore NaN values (treat them as if they are not in the array) when
computing the median. This will use `numpy.ma.median` if ``axis`` is
specified, or `numpy.nanmedian` if ``axis=None`` and numpy's version is
>1.10 because nanmedian is slightly faster in this case.
Returns
-------
mad_std : float or `~numpy.ndarray`
The robust standard deviation of the input data. If ``axis`` is
`None` then a scalar will be returned, otherwise a
`~numpy.ndarray` will be returned.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import mad_std
>>> rand = np.random.default_rng(12345)
>>> madstd = mad_std(rand.normal(5, 2, (100, 100)))
>>> print(madstd) # doctest: +FLOAT_CMP
1.984147963351707
See Also
--------
biweight_midvariance, biweight_midcovariance, median_absolute_deviation
"""
# NOTE: 1. / scipy.stats.norm.ppf(0.75) = 1.482602218505602
MAD = median_absolute_deviation(
data, axis=axis, func=func, ignore_nan=ignore_nan)
return MAD * 1.482602218505602
def signal_to_noise_oir_ccd(t, source_eps, sky_eps, dark_eps, rd, npix,
gain=1.0):
"""Computes the signal to noise ratio for source being observed in the
optical/IR using a CCD.
Parameters
----------
t : float or numpy.ndarray
CCD integration time in seconds
source_eps : float
Number of electrons (photons) or DN per second in the aperture from the
source. Note that this should already have been scaled by the filter
transmission and the quantum efficiency of the CCD. If the input is in
DN, then be sure to set the gain to the proper value for the CCD.
If the input is in electrons per second, then keep the gain as its
default of 1.0.
sky_eps : float
Number of electrons (photons) or DN per second per pixel from the sky
background. Should already be scaled by filter transmission and QE.
This must be in the same units as source_eps for the calculation to
make sense.
dark_eps : float
Number of thermal electrons per second per pixel. If this is given in
DN or ADU, then multiply by the gain to get the value in electrons.
rd : float
Read noise of the CCD in electrons. If this is given in
DN or ADU, then multiply by the gain to get the value in electrons.
npix : float
Size of the aperture in pixels
gain : float, optional
Gain of the CCD. In units of electrons per DN.
Returns
-------
SNR : float or numpy.ndarray
Signal to noise ratio calculated from the inputs
"""
signal = t * source_eps * gain
noise = np.sqrt(t * (source_eps * gain + npix *
(sky_eps * gain + dark_eps)) + npix * rd ** 2)
return signal / noise
def bootstrap(data, bootnum=100, samples=None, bootfunc=None):
"""Performs bootstrap resampling on numpy arrays.
Bootstrap resampling is used to understand confidence intervals of sample
estimates. This function returns versions of the dataset resampled with
replacement ("case bootstrapping"). These can all be run through a function
or statistic to produce a distribution of values which can then be used to
find the confidence intervals.
Parameters
----------
data : ndarray
N-D array. The bootstrap resampling will be performed on the first
index, so the first index should access the relevant information
to be bootstrapped.
bootnum : int, optional
Number of bootstrap resamples
samples : int, optional
Number of samples in each resample. The default `None` sets samples to
the number of datapoints
bootfunc : function, optional
Function to reduce the resampled data. Each bootstrap resample will
be put through this function and the results returned. If `None`, the
bootstrapped data will be returned
Returns
-------
boot : ndarray
If bootfunc is None, then each row is a bootstrap resample of the data.
If bootfunc is specified, then the columns will correspond to the
outputs of bootfunc.
Examples
--------
Obtain a twice resampled array:
>>> from astropy.stats import bootstrap
>>> import numpy as np
>>> from astropy.utils import NumpyRNGContext
>>> bootarr = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 2)
...
>>> bootresult # doctest: +FLOAT_CMP
array([[6., 9., 0., 6., 1., 1., 2., 8., 7., 0.],
[3., 5., 6., 3., 5., 3., 5., 8., 8., 0.]])
>>> bootresult.shape
(2, 10)
Obtain a statistic on the array
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 2, bootfunc=np.mean)
...
>>> bootresult # doctest: +FLOAT_CMP
array([4. , 4.6])
Obtain a statistic with two outputs on the array
>>> test_statistic = lambda x: (np.sum(x), np.mean(x))
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 3, bootfunc=test_statistic)
>>> bootresult # doctest: +FLOAT_CMP
array([[40. , 4. ],
[46. , 4.6],
[35. , 3.5]])
>>> bootresult.shape
(3, 2)
Obtain a statistic with two outputs on the array, keeping only the first
output
>>> bootfunc = lambda x:test_statistic(x)[0]
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 3, bootfunc=bootfunc)
...
>>> bootresult # doctest: +FLOAT_CMP
array([40., 46., 35.])
>>> bootresult.shape
(3,)
"""
if samples is None:
samples = data.shape[0]
# make sure the input is sane
if samples < 1 or bootnum < 1:
raise ValueError("neither 'samples' nor 'bootnum' can be less than 1.")
if bootfunc is None:
resultdims = (bootnum,) + (samples,) + data.shape[1:]
else:
# test number of outputs from bootfunc, avoid single outputs which are
# array-like
try:
resultdims = (bootnum, len(bootfunc(data)))
except TypeError:
resultdims = (bootnum,)
# create empty boot array
boot = np.empty(resultdims)
for i in range(bootnum):
bootarr = np.random.randint(low=0, high=data.shape[0], size=samples)
if bootfunc is None:
boot[i] = data[bootarr]
else:
boot[i] = bootfunc(data[bootarr])
return boot
def _scipy_kraft_burrows_nousek(N, B, CL):
'''Upper limit on a poisson count rate
The implementation is based on Kraft, Burrows and Nousek
`ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_.
The XMM-Newton upper limit server uses the same formalism.
Parameters
----------
N : int or np.int32/np.int64
Total observed count number
B : float or np.float32/np.float64
Background count rate (assumed to be known with negligible error
from a large background area).
CL : float or np.float32/np.float64
Confidence level (number between 0 and 1)
Returns
-------
S : source count limit
Notes
-----
Requires :mod:`~scipy`. This implementation will cause Overflow Errors for
about N > 100 (the exact limit depends on details of how scipy was
compiled). See `~astropy.stats.mpmath_poisson_upper_limit` for an
implementation that is slower, but can deal with arbitrarily high numbers
since it is based on the `mpmath <http://mpmath.org/>`_ library.
'''
from scipy.optimize import brentq
from scipy.integrate import quad
from scipy.special import factorial
from math import exp
def eqn8(N, B):
n = np.arange(N + 1, dtype=np.float64)
return 1. / (exp(-B) * np.sum(np.power(B, n) / factorial(n)))
# The parameters of eqn8 do not vary between calls so we can calculate the
# result once and reuse it. The same is True for the factorial of N.
# eqn7 is called hundred times so "caching" these values yields a
# significant speedup (factor 10).
eqn8_res = eqn8(N, B)
factorial_N = float(math.factorial(N))
def eqn7(S, N, B):
SpB = S + B
return eqn8_res * (exp(-SpB) * SpB**N / factorial_N)
def eqn9_left(S_min, S_max, N, B):
return quad(eqn7, S_min, S_max, args=(N, B), limit=500)
def find_s_min(S_max, N, B):
'''
Kraft, Burrows and Nousek suggest to integrate from N-B in both
directions at once, so that S_min and S_max move similarly (see
the article for details). Here, this is implemented differently:
Treat S_max as the optimization parameters in func and then
calculate the matching s_min that has has eqn7(S_max) =
eqn7(S_min) here.
'''
y_S_max = eqn7(S_max, N, B)
if eqn7(0, N, B) >= y_S_max:
return 0.
else:
return brentq(lambda x: eqn7(x, N, B) - y_S_max, 0, N - B)
def func(s):
s_min = find_s_min(s, N, B)
out = eqn9_left(s_min, s, N, B)
return out[0] - CL
S_max = brentq(func, N - B, 100)
S_min = find_s_min(S_max, N, B)
return S_min, S_max
def _mpmath_kraft_burrows_nousek(N, B, CL):
'''Upper limit on a poisson count rate
The implementation is based on Kraft, Burrows and Nousek in
`ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_.
The XMM-Newton upper limit server used the same formalism.
Parameters
----------
N : int or np.int32/np.int64
Total observed count number
B : float or np.float32/np.float64
Background count rate (assumed to be known with negligible error
from a large background area).
CL : float or np.float32/np.float64
Confidence level (number between 0 and 1)
Returns
-------
S : source count limit
Notes
-----
Requires the `mpmath <http://mpmath.org/>`_ library. See
`~astropy.stats.scipy_poisson_upper_limit` for an implementation
that is based on scipy and evaluates faster, but runs only to about
N = 100.
'''
from mpmath import mpf, factorial, findroot, fsum, power, exp, quad
# We convert these values to float. Because for some reason,
# mpmath.mpf cannot convert from numpy.int64
N = mpf(float(N))
B = mpf(float(B))
CL = mpf(float(CL))
tol = 1e-4
def eqn8(N, B):
sumterms = [power(B, n) / factorial(n) for n in range(int(N) + 1)]
return 1. / (exp(-B) * fsum(sumterms))
eqn8_res = eqn8(N, B)
factorial_N = factorial(N)
def eqn7(S, N, B):
SpB = S + B
return eqn8_res * (exp(-SpB) * SpB**N / factorial_N)
def eqn9_left(S_min, S_max, N, B):
def eqn7NB(S):
return eqn7(S, N, B)
return quad(eqn7NB, [S_min, S_max])
def find_s_min(S_max, N, B):
'''
Kraft, Burrows and Nousek suggest to integrate from N-B in both
directions at once, so that S_min and S_max move similarly (see
the article for details). Here, this is implemented differently:
Treat S_max as the optimization parameters in func and then
calculate the matching s_min that has has eqn7(S_max) =
eqn7(S_min) here.
'''
y_S_max = eqn7(S_max, N, B)
# If B > N, then N-B, the "most probable" values is < 0
# and thus s_min is certainly 0.
# Note: For small N, s_max is also close to 0 and root finding
# might find the wrong root, thus it is important to handle this
# case here and return the analytical answer (s_min = 0).
if (B >= N) or (eqn7(0, N, B) >= y_S_max):
return 0.
else:
def eqn7ysmax(x):
return eqn7(x, N, B) - y_S_max
return findroot(eqn7ysmax, [0., N - B], solver='ridder',
tol=tol)
def func(s):
s_min = find_s_min(s, N, B)
out = eqn9_left(s_min, s, N, B)
return out - CL
# Several numerical problems were found prevent the solvers from finding
# the roots unless the starting values are very close to the final values.
# Thus, this primitive, time-wasting, brute-force stepping here to get
# an interval that can be fed into the ridder solver.
s_max_guess = max(N - B, 1.)
while func(s_max_guess) < 0:
s_max_guess += 1
S_max = findroot(func, [s_max_guess - 1, s_max_guess], solver='ridder',
tol=tol)
S_min = find_s_min(S_max, N, B)
return float(S_min), float(S_max)
def _kraft_burrows_nousek(N, B, CL):
'''Upper limit on a poisson count rate
The implementation is based on Kraft, Burrows and Nousek in
`ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_.
The XMM-Newton upper limit server used the same formalism.
Parameters
----------
N : int or np.int32/np.int64
Total observed count number
B : float or np.float32/np.float64
Background count rate (assumed to be known with negligible error
from a large background area).
CL : float or np.float32/np.float64
Confidence level (number between 0 and 1)
Returns
-------
S : source count limit
Notes
-----
This functions has an optional dependency: Either :mod:`scipy` or `mpmath
<http://mpmath.org/>`_ need to be available. (Scipy only works for
N < 100).
'''
from astropy.utils.compat.optional_deps import HAS_SCIPY, HAS_MPMATH
if HAS_SCIPY and N <= 100:
try:
return _scipy_kraft_burrows_nousek(N, B, CL)
except OverflowError:
if not HAS_MPMATH:
raise ValueError('Need mpmath package for input numbers this '
'large.')
if HAS_MPMATH:
return _mpmath_kraft_burrows_nousek(N, B, CL)
raise ImportError('Either scipy or mpmath are required.')
def kuiper_false_positive_probability(D, N):
"""Compute the false positive probability for the Kuiper statistic.
Uses the set of four formulas described in Paltani 2004; they report
the resulting function never underestimates the false positive
probability but can be a bit high in the N=40..50 range.
(They quote a factor 1.5 at the 1e-7 level.)
Parameters
----------
D : float
The Kuiper test score.
N : float
The effective sample size.
Returns
-------
fpp : float
The probability of a score this large arising from the null hypothesis.
Notes
-----
Eq 7 of Paltani 2004 appears to incorrectly quote the original formula
(Stephens 1965). This function implements the original formula, as it
produces a result closer to Monte Carlo simulations.
References
----------
.. [1] Paltani, S., "Searching for periods in X-ray observations using
Kuiper's test. Application to the ROSAT PSPC archive",
Astronomy and Astrophysics, v.240, p.789-790, 2004.
.. [2] Stephens, M. A., "The goodness-of-fit statistic VN: distribution
and significance points", Biometrika, v.52, p.309, 1965.
"""
try:
from scipy.special import factorial, comb
except ImportError:
# Retained for backwards compatibility with older versions of scipy
# (factorial appears to have moved here in 0.14)
from scipy.misc import factorial, comb
if D < 0. or D > 2.:
raise ValueError("Must have 0<=D<=2 by definition of the Kuiper test")
if D < 2. / N:
return 1. - factorial(N) * (D - 1. / N)**(N - 1)
elif D < 3. / N:
k = -(N * D - 1.) / 2.
r = np.sqrt(k**2 - (N * D - 2.)**2 / 2.)
a, b = -k + r, -k - r
return 1 - (factorial(N - 1) * (b**(N - 1) * (1 - a) - a**(N - 1) * (1 - b))
/ N**(N - 2) / (b - a))
elif (D > 0.5 and N % 2 == 0) or (D > (N - 1.) / (2. * N) and N % 2 == 1):
# NOTE: the upper limit of this sum is taken from Stephens 1965
t = np.arange(np.floor(N * (1 - D)) + 1)
y = D + t / N
Tt = y**(t - 3) * (y**3 * N
- y**2 * t * (3 - 2 / N)
+ y * t * (t - 1) * (3 - 2 / N) / N
- t * (t - 1) * (t - 2) / N**2)
term1 = comb(N, t)
term2 = (1 - D - t / N)**(N - t - 1)
# term1 is formally finite, but is approximated by numpy as np.inf for
# large values, so we set them to zero manually when they would be
# multiplied by zero anyway
term1[(term1 == np.inf) & (term2 == 0)] = 0.
final_term = Tt * term1 * term2
return final_term.sum()
else:
z = D * np.sqrt(N)
# When m*z>18.82 (sqrt(-log(finfo(double))/2)), exp(-2m**2z**2)
# underflows. Cutting off just before avoids triggering a (pointless)
# underflow warning if `under="warn"`.
ms = np.arange(1, 18.82 / z)
S1 = (2 * (4 * ms**2 * z**2 - 1) * np.exp(-2 * ms**2 * z**2)).sum()
S2 = (ms**2 * (4 * ms**2 * z**2 - 3) * np.exp(-2 * ms**2 * z**2)).sum()
return S1 - 8 * D / 3 * S2
def kuiper(data, cdf=lambda x: x, args=()):
"""Compute the Kuiper statistic.
Use the Kuiper statistic version of the Kolmogorov-Smirnov test to
find the probability that a sample like ``data`` was drawn from the
distribution whose CDF is given as ``cdf``.
.. warning::
This will not work correctly for distributions that are actually
discrete (Poisson, for example).
Parameters
----------
data : array-like
The data values.
cdf : callable
A callable to evaluate the CDF of the distribution being tested
against. Will be called with a vector of all values at once.
The default is a uniform distribution.
args : list-like, optional
Additional arguments to be supplied to cdf.
Returns
-------
D : float
The raw statistic.
fpp : float
The probability of a D this large arising with a sample drawn from
the distribution whose CDF is cdf.
Notes
-----
The Kuiper statistic resembles the Kolmogorov-Smirnov test in that
it is nonparametric and invariant under reparameterizations of the data.
The Kuiper statistic, in addition, is equally sensitive throughout
the domain, and it is also invariant under cyclic permutations (making
it particularly appropriate for analyzing circular data).
Returns (D, fpp), where D is the Kuiper D number and fpp is the
probability that a value as large as D would occur if data was
drawn from cdf.
.. warning::
The fpp is calculated only approximately, and it can be
as much as 1.5 times the true value.
Stephens 1970 claims this is more effective than the KS at detecting
changes in the variance of a distribution; the KS is (he claims) more
sensitive at detecting changes in the mean.
If cdf was obtained from data by fitting, then fpp is not correct and
it will be necessary to do Monte Carlo simulations to interpret D.
D should normally be independent of the shape of CDF.
References
----------
.. [1] Stephens, M. A., "Use of the Kolmogorov-Smirnov, Cramer-Von Mises
and Related Statistics Without Extensive Tables", Journal of the
Royal Statistical Society. Series B (Methodological), Vol. 32,
No. 1. (1970), pp. 115-122.
"""
data = np.sort(data)
cdfv = cdf(data, *args)
N = len(data)
D = (np.amax(cdfv - np.arange(N) / float(N)) +
np.amax((np.arange(N) + 1) / float(N) - cdfv))
return D, kuiper_false_positive_probability(D, N)
def kuiper_two(data1, data2):
"""Compute the Kuiper statistic to compare two samples.
Parameters
----------
data1 : array-like
The first set of data values.
data2 : array-like
The second set of data values.
Returns
-------
D : float
The raw test statistic.
fpp : float
The probability of obtaining two samples this different from
the same distribution.
.. warning::
The fpp is quite approximate, especially for small samples.
"""
data1 = np.sort(data1)
data2 = np.sort(data2)
n1, = data1.shape
n2, = data2.shape
common_type = np.find_common_type([], [data1.dtype, data2.dtype])
if not (np.issubdtype(common_type, np.number)
and not np.issubdtype(common_type, np.complexfloating)):
raise ValueError('kuiper_two only accepts real inputs')
# nans, if any, are at the end after sorting.
if np.isnan(data1[-1]) or np.isnan(data2[-1]):
raise ValueError('kuiper_two only accepts non-nan inputs')
D = _stats.ks_2samp(np.asarray(data1, common_type),
np.asarray(data2, common_type))
Ne = len(data1) * len(data2) / float(len(data1) + len(data2))
return D, kuiper_false_positive_probability(D, Ne)
def fold_intervals(intervals):
"""Fold the weighted intervals to the interval (0,1).
Convert a list of intervals (ai, bi, wi) to a list of non-overlapping
intervals covering (0,1). Each output interval has a weight equal
to the sum of the wis of all the intervals that include it. All intervals
are interpreted modulo 1, and weights are accumulated counting
multiplicity. This is appropriate, for example, if you have one or more
blocks of observation and you want to determine how much observation
time was spent on different parts of a system's orbit (the blocks
should be converted to units of the orbital period first).
Parameters
----------
intervals : list of (3,) tuple
For each tuple (ai,bi,wi); ai and bi are the limits of the interval,
and wi is the weight to apply to the interval.
Returns
-------
breaks : (N,) array of float
The endpoints of a set of intervals covering [0,1]; breaks[0]=0 and
breaks[-1] = 1
weights : (N-1,) array of float
The ith element is the sum of number of times the interval
breaks[i],breaks[i+1] is included in each interval times the weight
associated with that interval.
"""
r = []
breaks = set()
tot = 0
for (a, b, wt) in intervals:
tot += (np.ceil(b) - np.floor(a)) * wt
fa = a % 1
breaks.add(fa)
r.append((0, fa, -wt))
fb = b % 1
breaks.add(fb)
r.append((fb, 1, -wt))
breaks.add(0.)
breaks.add(1.)
breaks = sorted(breaks)
breaks_map = {f: i for (i, f) in enumerate(breaks)}
totals = np.zeros(len(breaks) - 1)
totals += tot
for (a, b, wt) in r:
totals[breaks_map[a]:breaks_map[b]] += wt
return np.array(breaks), totals
def cdf_from_intervals(breaks, totals):
"""Construct a callable piecewise-linear CDF from a pair of arrays.
Take a pair of arrays in the format returned by fold_intervals and
make a callable cumulative distribution function on the interval
(0,1).
Parameters
----------
breaks : (N,) array of float
The boundaries of successive intervals.
totals : (N-1,) array of float
The weight for each interval.
Returns
-------
f : callable
A cumulative distribution function corresponding to the
piecewise-constant probability distribution given by breaks, weights
"""
if breaks[0] != 0 or breaks[-1] != 1:
raise ValueError("Intervals must be restricted to [0,1]")
if np.any(np.diff(breaks) <= 0):
raise ValueError("Breaks must be strictly increasing")
if np.any(totals < 0):
raise ValueError(
"Total weights in each subinterval must be nonnegative")
if np.all(totals == 0):
raise ValueError("At least one interval must have positive exposure")
b = breaks.copy()
c = np.concatenate(((0,), np.cumsum(totals * np.diff(b))))
c /= c[-1]
return lambda x: np.interp(x, b, c, 0, 1)
def interval_overlap_length(i1, i2):
"""Compute the length of overlap of two intervals.
Parameters
----------
i1, i2 : (float, float)
The two intervals, (interval 1, interval 2).
Returns
-------
l : float
The length of the overlap between the two intervals.
"""
(a, b) = i1
(c, d) = i2
if a < c:
if b < c:
return 0.
elif b < d:
return b - c
else:
return d - c
elif a < d:
if b < d:
return b - a
else:
return d - a
else:
return 0
def histogram_intervals(n, breaks, totals):
"""Histogram of a piecewise-constant weight function.
This function takes a piecewise-constant weight function and
computes the average weight in each histogram bin.
Parameters
----------
n : int
The number of bins
breaks : (N,) array of float
Endpoints of the intervals in the PDF
totals : (N-1,) array of float
Probability densities in each bin
Returns
-------
h : array of float
The average weight for each bin
"""
h = np.zeros(n)
start = breaks[0]
for i in range(len(totals)):
end = breaks[i + 1]
for j in range(n):
ol = interval_overlap_length((float(j) / n,
float(j + 1) / n), (start, end))
h[j] += ol / (1. / n) * totals[i]
start = end
return h
|
bca752d42ff289f3819f1f2ddc33e3eb32119afce14784323cd47073646c37a6 | """
Table property for providing information about table.
"""
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import sys
import os
from contextlib import contextmanager
from inspect import isclass
import numpy as np
from astropy.utils.data_info import DataInfo
__all__ = ['table_info', 'TableInfo', 'serialize_method_as']
def table_info(tbl, option='attributes', out=''):
"""
Write summary information about column to the ``out`` filehandle.
By default this prints to standard output via sys.stdout.
The ``option`` argument specifies what type of information
to include. This can be a string, a function, or a list of
strings or functions. Built-in options are:
- ``attributes``: basic column meta data like ``dtype`` or ``format``
- ``stats``: basic statistics: minimum, mean, and maximum
If a function is specified then that function will be called with the
column as its single argument. The function must return an OrderedDict
containing the information attributes.
If a list is provided then the information attributes will be
appended for each of the options, in order.
Examples
--------
>>> from astropy.table.table_helpers import simple_table
>>> t = simple_table(size=2, kinds='if')
>>> t['a'].unit = 'm'
>>> t.info()
<Table length=2>
name dtype unit
---- ------- ----
a int64 m
b float64
>>> t.info('stats')
<Table length=2>
name mean std min max
---- ---- --- --- ---
a 1.5 0.5 1 2
b 1.5 0.5 1 2
Parameters
----------
option : str, callable, list of (str or callable)
Info option, defaults to 'attributes'.
out : file-like, None
Output destination, default is sys.stdout. If None then a
Table with information attributes is returned
Returns
-------
info : `~astropy.table.Table` if out==None else None
"""
from .table import Table
if out == '':
out = sys.stdout
descr_vals = [tbl.__class__.__name__]
if tbl.masked:
descr_vals.append('masked=True')
descr_vals.append(f'length={len(tbl)}')
outlines = ['<' + ' '.join(descr_vals) + '>']
cols = list(tbl.columns.values())
if tbl.colnames:
infos = []
for col in cols:
infos.append(col.info(option, out=None))
info = Table(infos, names=list(infos[0]))
else:
info = Table()
if out is None:
return info
# Since info is going to a filehandle for viewing then remove uninteresting
# columns.
if 'class' in info.colnames:
# Remove 'class' info column if all table columns are the same class
# and they are the default column class for that table.
uniq_types = {type(col) for col in cols}
if len(uniq_types) == 1 and isinstance(cols[0], tbl.ColumnClass):
del info['class']
if 'n_bad' in info.colnames and np.all(info['n_bad'] == 0):
del info['n_bad']
# Standard attributes has 'length' but this is typically redundant
if 'length' in info.colnames and np.all(info['length'] == len(tbl)):
del info['length']
for name in info.colnames:
if info[name].dtype.kind in 'SU' and np.all(info[name] == ''):
del info[name]
if tbl.colnames:
outlines.extend(info.pformat(max_width=-1, max_lines=-1, show_unit=False))
else:
outlines.append('<No columns>')
out.writelines(outline + os.linesep for outline in outlines)
class TableInfo(DataInfo):
def __call__(self, option='attributes', out=''):
return table_info(self._parent, option, out)
__call__.__doc__ = table_info.__doc__
@contextmanager
def serialize_method_as(tbl, serialize_method):
"""Context manager to temporarily override individual
column info.serialize_method dict values. The serialize_method
attribute is an optional dict which might look like ``{'fits':
'jd1_jd2', 'ecsv': 'formatted_value', ..}``.
``serialize_method`` is a str or dict. If str then it the the value
is the ``serialize_method`` that will be used for all formats.
If dict then the key values can be either:
- Column name. This has higher precedence than the second option of
matching class.
- Class (matches any column which is an instance of the class)
This context manager is expected to be used only within ``Table.write``.
It could have been a private method on Table but prefer not to add
clutter to that class.
Parameters
----------
tbl : Table object
Input table
serialize_method : dict, str
Dict with key values of column names or types, or str
Returns
-------
None (context manager)
"""
def get_override_sm(col):
"""
Determine if the ``serialize_method`` str or dict specifies an
override of column presets for ``col``. Returns the matching
serialize_method value or ``None``.
"""
# If a string then all columns match
if isinstance(serialize_method, str):
return serialize_method
# If column name then return that serialize_method
if col.info.name in serialize_method:
return serialize_method[col.info.name]
# Otherwise look for subclass matches
for key in serialize_method:
if isclass(key) and isinstance(col, key):
return serialize_method[key]
return None
# Setup for the context block. Set individual column.info.serialize_method
# values as appropriate and keep a backup copy. If ``serialize_method``
# is None or empty then don't do anything.
# Original serialize_method dict, keyed by column name. This only
# gets used and set if there is an override.
original_sms = {}
if serialize_method:
# Go through every column and if it has a serialize_method info
# attribute then potentially update it for the duration of the write.
for col in tbl.itercols():
if hasattr(col.info, 'serialize_method'):
override_sm = get_override_sm(col)
if override_sm:
# Make a reference copy of the column serialize_method
# dict which maps format (e.g. 'fits') to the
# appropriate method (e.g. 'data_mask').
original_sms[col.info.name] = col.info.serialize_method
# Set serialize method for *every* available format. This is
# brute force, but at this point the format ('fits', 'ecsv', etc)
# is not actually known (this gets determined by the write function
# in registry.py). Note this creates a new temporary dict object
# so that the restored version is the same original object.
col.info.serialize_method = {fmt: override_sm
for fmt in col.info.serialize_method}
# Finally yield for the context block
try:
yield
finally:
# Teardown (restore) for the context block. Be sure to do this even
# if an exception occurred.
if serialize_method:
for name, original_sm in original_sms.items():
tbl[name].info.serialize_method = original_sm
|
13480af1696a9b56f3bc00815d4b2540e7ddd3d8b2daefda408ad68e4c6a76a7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The Index class can use several implementations as its
engine. Any implementation should implement the following:
__init__(data, row_index) : initialize index based on key/row list pairs
add(key, row) -> None : add (key, row) to existing data
remove(key, data=None) -> boolean : remove data from self[key], or all of
self[key] if data is None
shift_left(row) -> None : decrement row numbers after row
shift_right(row) -> None : increase row numbers >= row
find(key) -> list : list of rows corresponding to key
range(lower, upper, bounds) -> list : rows in self[k] where k is between
lower and upper (<= or < based on bounds)
sort() -> None : make row order align with key order
sorted_data() -> list of rows in sorted order (by key)
replace_rows(row_map) -> None : replace row numbers based on slice
items() -> list of tuples of the form (key, data)
Notes
-----
When a Table is initialized from another Table, indices are
(deep) copied and their columns are set to the columns of the new Table.
Column creation:
Column(c) -> deep copy of indices
c[[1, 2]] -> deep copy and reordering of indices
c[1:2] -> reference
array.view(Column) -> no indices
"""
from copy import deepcopy
import numpy as np
from .bst import MinValue, MaxValue
from .sorted_array import SortedArray
class QueryError(ValueError):
'''
Indicates that a given index cannot handle the supplied query.
'''
pass
class Index:
'''
The Index class makes it possible to maintain indices
on columns of a Table, so that column values can be queried
quickly and efficiently. Column values are stored in lexicographic
sorted order, which allows for binary searching in O(log n).
Parameters
----------
columns : list or None
List of columns on which to create an index. If None,
create an empty index for purposes of deep copying.
engine : type, instance, or None
Indexing engine class to use (from among SortedArray, BST,
and SCEngine) or actual engine instance.
If the supplied argument is None (by default), use SortedArray.
unique : bool (defaults to False)
Whether the values of the index must be unique
'''
def __init__(self, columns, engine=None, unique=False):
# Local imports to avoid import problems.
from .table import Table, Column
from astropy.time import Time
if columns is not None:
columns = list(columns)
if engine is not None and not isinstance(engine, type):
# create from data
self.engine = engine.__class__
self.data = engine
self.columns = columns
return
# by default, use SortedArray
self.engine = engine or SortedArray
if columns is None: # this creates a special exception for deep copying
columns = []
data = []
row_index = []
elif len(columns) == 0:
raise ValueError("Cannot create index without at least one column")
elif len(columns) == 1:
col = columns[0]
row_index = Column(col.argsort())
data = Table([col[row_index]])
else:
num_rows = len(columns[0])
# replace Time columns with approximate form and remainder
new_columns = []
for col in columns:
if isinstance(col, Time):
new_columns.append(col.jd)
remainder = col - col.__class__(col.jd, format='jd', scale=col.scale)
new_columns.append(remainder.jd)
else:
new_columns.append(col)
# sort the table lexicographically and keep row numbers
table = Table(columns + [np.arange(num_rows)], copy_indices=False)
sort_columns = new_columns[::-1]
try:
lines = table[np.lexsort(sort_columns)]
except TypeError: # arbitrary mixins might not work with lexsort
lines = table[table.argsort()]
data = lines[lines.colnames[:-1]]
row_index = lines[lines.colnames[-1]]
self.data = self.engine(data, row_index, unique=unique)
self.columns = columns
def __len__(self):
'''
Number of rows in index.
'''
return len(self.columns[0])
def replace_col(self, prev_col, new_col):
'''
Replace an indexed column with an updated reference.
Parameters
----------
prev_col : Column
Column reference to replace
new_col : Column
New column reference
'''
self.columns[self.col_position(prev_col.info.name)] = new_col
def reload(self):
'''
Recreate the index based on data in self.columns.
'''
self.__init__(self.columns, engine=self.engine)
def col_position(self, col_name):
'''
Return the position of col_name in self.columns.
Parameters
----------
col_name : str
Name of column to look up
'''
for i, c in enumerate(self.columns):
if c.info.name == col_name:
return i
raise ValueError(f"Column does not belong to index: {col_name}")
def insert_row(self, pos, vals, columns):
'''
Insert a new row from the given values.
Parameters
----------
pos : int
Position at which to insert row
vals : list or tuple
List of values to insert into a new row
columns : list
Table column references
'''
key = [None] * len(self.columns)
for i, col in enumerate(columns):
try:
key[self.col_position(col.info.name)] = vals[i]
except ValueError: # not a member of index
continue
num_rows = len(self.columns[0])
if pos < num_rows:
# shift all rows >= pos to the right
self.data.shift_right(pos)
self.data.add(tuple(key), pos)
def get_row_specifier(self, row_specifier):
'''
Return an iterable corresponding to the
input row specifier.
Parameters
----------
row_specifier : int, list, ndarray, or slice
'''
if isinstance(row_specifier, (int, np.integer)):
# single row
return (row_specifier,)
elif isinstance(row_specifier, (list, np.ndarray)):
return row_specifier
elif isinstance(row_specifier, slice):
col_len = len(self.columns[0])
return range(*row_specifier.indices(col_len))
raise ValueError("Expected int, array of ints, or slice but "
"got {} in remove_rows".format(row_specifier))
def remove_rows(self, row_specifier):
'''
Remove the given rows from the index.
Parameters
----------
row_specifier : int, list, ndarray, or slice
Indicates which row(s) to remove
'''
rows = []
# To maintain the correct row order, we loop twice,
# deleting rows first and then reordering the remaining rows
for row in self.get_row_specifier(row_specifier):
self.remove_row(row, reorder=False)
rows.append(row)
# second pass - row order is reversed to maintain
# correct row numbers
for row in reversed(sorted(rows)):
self.data.shift_left(row)
def remove_row(self, row, reorder=True):
'''
Remove the given row from the index.
Parameters
----------
row : int
Position of row to remove
reorder : bool
Whether to reorder indices after removal
'''
# for removal, form a key consisting of column values in this row
if not self.data.remove(tuple(col[row] for col in self.columns), row):
raise ValueError(f"Could not remove row {row} from index")
# decrement the row number of all later rows
if reorder:
self.data.shift_left(row)
def find(self, key):
'''
Return the row values corresponding to key, in sorted order.
Parameters
----------
key : tuple
Values to search for in each column
'''
return self.data.find(key)
def same_prefix(self, key):
'''
Return rows whose keys contain the supplied key as a prefix.
Parameters
----------
key : tuple
Prefix for which to search
'''
return self.same_prefix_range(key, key, (True, True))
def same_prefix_range(self, lower, upper, bounds=(True, True)):
'''
Return rows whose keys have a prefix in the given range.
Parameters
----------
lower : tuple
Lower prefix bound
upper : tuple
Upper prefix bound
bounds : tuple (x, y) of bools
Indicates whether the search should be inclusive or
exclusive with respect to the endpoints. The first
argument x corresponds to an inclusive lower bound,
and the second argument y to an inclusive upper bound.
'''
n = len(lower)
ncols = len(self.columns)
a = MinValue() if bounds[0] else MaxValue()
b = MaxValue() if bounds[1] else MinValue()
# [x, y] search corresponds to [(x, min), (y, max)]
# (x, y) search corresponds to ((x, max), (x, min))
lower = lower + tuple((ncols - n) * [a])
upper = upper + tuple((ncols - n) * [b])
return self.data.range(lower, upper, bounds)
def range(self, lower, upper, bounds=(True, True)):
'''
Return rows within the given range.
Parameters
----------
lower : tuple
Lower prefix bound
upper : tuple
Upper prefix bound
bounds : tuple (x, y) of bools
Indicates whether the search should be inclusive or
exclusive with respect to the endpoints. The first
argument x corresponds to an inclusive lower bound,
and the second argument y to an inclusive upper bound.
'''
return self.data.range(lower, upper, bounds)
def replace(self, row, col_name, val):
'''
Replace the value of a column at a given position.
Parameters
----------
row : int
Row number to modify
col_name : str
Name of the Column to modify
val : col.info.dtype
Value to insert at specified row of col
'''
self.remove_row(row, reorder=False)
key = [c[row] for c in self.columns]
key[self.col_position(col_name)] = val
self.data.add(tuple(key), row)
def replace_rows(self, col_slice):
'''
Modify rows in this index to agree with the specified
slice. For example, given an index
{'5': 1, '2': 0, '3': 2} on a column ['2', '5', '3'],
an input col_slice of [2, 0] will result in the relabeling
{'3': 0, '2': 1} on the sliced column ['3', '2'].
Parameters
----------
col_slice : list
Indices to slice
'''
row_map = {row: i for i, row in enumerate(col_slice)}
self.data.replace_rows(row_map)
def sort(self):
'''
Make row numbers follow the same sort order as the keys
of the index.
'''
self.data.sort()
def sorted_data(self):
'''
Returns a list of rows in sorted order based on keys;
essentially acts as an argsort() on columns.
'''
return self.data.sorted_data()
def __getitem__(self, item):
'''
Returns a sliced version of this index.
Parameters
----------
item : slice
Input slice
Returns
-------
SlicedIndex
A sliced reference to this index.
'''
return SlicedIndex(self, item)
def __repr__(self):
col_names = tuple(col.info.name for col in self.columns)
return f'<{self.__class__.__name__} columns={col_names} data={self.data}>'
def __deepcopy__(self, memo):
'''
Return a deep copy of this index.
Notes
-----
The default deep copy must be overridden to perform
a shallow copy of the index columns, avoiding infinite recursion.
Parameters
----------
memo : dict
'''
# Bypass Index.__new__ to create an actual Index, not a SlicedIndex.
index = super().__new__(self.__class__)
index.__init__(None, engine=self.engine)
index.data = deepcopy(self.data, memo)
index.columns = self.columns[:] # new list, same columns
memo[id(self)] = index
return index
class SlicedIndex:
'''
This class provides a wrapper around an actual Index object
to make index slicing function correctly. Since numpy expects
array slices to provide an actual data view, a SlicedIndex should
retrieve data directly from the original index and then adapt
it to the sliced coordinate system as appropriate.
Parameters
----------
index : Index
The original Index reference
index_slice : tuple, slice
The slice to which this SlicedIndex corresponds
original : bool
Whether this SlicedIndex represents the original index itself.
For the most part this is similar to index[:] but certain
copying operations are avoided, and the slice retains the
length of the actual index despite modification.
'''
def __init__(self, index, index_slice, original=False):
self.index = index
self.original = original
self._frozen = False
if isinstance(index_slice, tuple):
self.start, self._stop, self.step = index_slice
elif isinstance(index_slice, slice): # index_slice is an actual slice
num_rows = len(index.columns[0])
self.start, self._stop, self.step = index_slice.indices(num_rows)
else:
raise TypeError('index_slice must be tuple or slice')
@property
def length(self):
return 1 + (self.stop - self.start - 1) // self.step
@property
def stop(self):
'''
The stopping position of the slice, or the end of the
index if this is an original slice.
'''
return len(self.index) if self.original else self._stop
def __getitem__(self, item):
'''
Returns another slice of this Index slice.
Parameters
----------
item : slice
Index slice
'''
if self.length <= 0:
# empty slice
return SlicedIndex(self.index, slice(1, 0))
start, stop, step = item.indices(self.length)
new_start = self.orig_coords(start)
new_stop = self.orig_coords(stop)
new_step = self.step * step
return SlicedIndex(self.index, (new_start, new_stop, new_step))
def sliced_coords(self, rows):
'''
Convert the input rows to the sliced coordinate system.
Parameters
----------
rows : list
Rows in the original coordinate system
Returns
-------
sliced_rows : list
Rows in the sliced coordinate system
'''
if self.original:
return rows
else:
rows = np.array(rows)
row0 = rows - self.start
if self.step != 1:
correct_mod = np.mod(row0, self.step) == 0
row0 = row0[correct_mod]
if self.step > 0:
ok = (row0 >= 0) & (row0 < self.stop - self.start)
else:
ok = (row0 <= 0) & (row0 > self.stop - self.start)
return row0[ok] // self.step
def orig_coords(self, row):
'''
Convert the input row from sliced coordinates back
to original coordinates.
Parameters
----------
row : int
Row in the sliced coordinate system
Returns
-------
orig_row : int
Row in the original coordinate system
'''
return row if self.original else self.start + row * self.step
def find(self, key):
return self.sliced_coords(self.index.find(key))
def where(self, col_map):
return self.sliced_coords(self.index.where(col_map))
def range(self, lower, upper):
return self.sliced_coords(self.index.range(lower, upper))
def same_prefix(self, key):
return self.sliced_coords(self.index.same_prefix(key))
def sorted_data(self):
return self.sliced_coords(self.index.sorted_data())
def replace(self, row, col, val):
if not self._frozen:
self.index.replace(self.orig_coords(row), col, val)
def get_index_or_copy(self):
if not self.original:
# replace self.index with a new object reference
self.index = deepcopy(self.index)
return self.index
def insert_row(self, pos, vals, columns):
if not self._frozen:
self.get_index_or_copy().insert_row(self.orig_coords(pos), vals, columns)
def get_row_specifier(self, row_specifier):
return [self.orig_coords(x) for x in
self.index.get_row_specifier(row_specifier)]
def remove_rows(self, row_specifier):
if not self._frozen:
self.get_index_or_copy().remove_rows(row_specifier)
def replace_rows(self, col_slice):
if not self._frozen:
self.index.replace_rows([self.orig_coords(x) for x in col_slice])
def sort(self):
if not self._frozen:
self.get_index_or_copy().sort()
def __repr__(self):
slice_str = '' if self.original else f' slice={self.start}:{self.stop}:{self.step}'
return (f'<{self.__class__.__name__} original={self.original}{slice_str}'
f' index={self.index}>')
def replace_col(self, prev_col, new_col):
self.index.replace_col(prev_col, new_col)
def reload(self):
self.index.reload()
def col_position(self, col_name):
return self.index.col_position(col_name)
def get_slice(self, col_slice, item):
'''
Return a newly created index from the given slice.
Parameters
----------
col_slice : Column object
Already existing slice of a single column
item : list or ndarray
Slice for retrieval
'''
from .table import Table
if len(self.columns) == 1:
index = Index([col_slice], engine=self.data.__class__)
return self.__class__(index, slice(0, 0, None), original=True)
t = Table(self.columns, copy_indices=False)
with t.index_mode('discard_on_copy'):
new_cols = t[item].columns.values()
index = Index(new_cols, engine=self.data.__class__)
return self.__class__(index, slice(0, 0, None), original=True)
@property
def columns(self):
return self.index.columns
@property
def data(self):
return self.index.data
def get_index(table, table_copy=None, names=None):
"""
Inputs a table and some subset of its columns as table_copy.
List or tuple containing names of columns as names,and returns an index
corresponding to this subset or list or None if no such index exists.
Parameters
----------
table : `Table`
Input table
table_copy : `Table`, optional
Subset of the columns in the ``table`` argument
names : list, tuple, optional
Subset of column names in the ``table`` argument
Returns
-------
Index of columns or None
"""
if names is not None and table_copy is not None:
raise ValueError('one and only one argument from "table_copy" or'
' "names" is required')
if names is None and table_copy is None:
raise ValueError('one and only one argument from "table_copy" or'
' "names" is required')
if names is not None:
names = set(names)
else:
names = set(table_copy.colnames)
if not names <= set(table.colnames):
raise ValueError(f'{names} is not a subset of table columns')
for name in names:
for index in table[name].info.indices:
if {col.info.name for col in index.columns} == names:
return index
return None
def get_index_by_names(table, names):
'''
Returns an index in ``table`` corresponding to the ``names`` columns or None
if no such index exists.
Parameters
----------
table : `Table`
Input table
nmaes : tuple, list
Column names
'''
names = list(names)
for index in table.indices:
index_names = [col.info.name for col in index.columns]
if index_names == names:
return index
else:
return None
class _IndexModeContext:
'''
A context manager that allows for special indexing modes, which
are intended to improve performance. Currently the allowed modes
are "freeze", in which indices are not modified upon column modification,
"copy_on_getitem", in which indices are copied upon column slicing,
and "discard_on_copy", in which indices are discarded upon table
copying/slicing.
'''
_col_subclasses = {}
def __init__(self, table, mode):
'''
Parameters
----------
table : Table
The table to which the mode should be applied
mode : str
Either 'freeze', 'copy_on_getitem', or 'discard_on_copy'.
In 'discard_on_copy' mode,
indices are not copied whenever columns or tables are copied.
In 'freeze' mode, indices are not modified whenever columns are
modified; at the exit of the context, indices refresh themselves
based on column values. This mode is intended for scenarios in
which one intends to make many additions or modifications on an
indexed column.
In 'copy_on_getitem' mode, indices are copied when taking column
slices as well as table slices, so col[i0:i1] will preserve
indices.
'''
self.table = table
self.mode = mode
# Used by copy_on_getitem
self._orig_classes = []
if mode not in ('freeze', 'discard_on_copy', 'copy_on_getitem'):
raise ValueError("Expected a mode of either 'freeze', "
"'discard_on_copy', or 'copy_on_getitem', got "
"'{}'".format(mode))
def __enter__(self):
if self.mode == 'discard_on_copy':
self.table._copy_indices = False
elif self.mode == 'copy_on_getitem':
for col in self.table.columns.values():
self._orig_classes.append(col.__class__)
col.__class__ = self._get_copy_on_getitem_shim(col.__class__)
else:
for index in self.table.indices:
index._frozen = True
def __exit__(self, exc_type, exc_value, traceback):
if self.mode == 'discard_on_copy':
self.table._copy_indices = True
elif self.mode == 'copy_on_getitem':
for col in reversed(self.table.columns.values()):
col.__class__ = self._orig_classes.pop()
else:
for index in self.table.indices:
index._frozen = False
index.reload()
def _get_copy_on_getitem_shim(self, cls):
"""
This creates a subclass of the column's class which overrides that
class's ``__getitem__``, such that when returning a slice of the
column, the relevant indices are also copied over to the slice.
Ideally, rather than shimming in a new ``__class__`` we would be able
to just flip a flag that is checked by the base class's
``__getitem__``. Unfortunately, since the flag needs to be a Python
variable, this slows down ``__getitem__`` too much in the more common
case where a copy of the indices is not needed. See the docstring for
``astropy.table._column_mixins`` for more information on that.
"""
if cls in self._col_subclasses:
return self._col_subclasses[cls]
def __getitem__(self, item):
value = cls.__getitem__(self, item)
if type(value) is type(self):
value = self.info.slice_indices(value, item, len(self))
return value
clsname = f'_{cls.__name__}WithIndexCopy'
new_cls = type(str(clsname), (cls,), {'__getitem__': __getitem__})
self._col_subclasses[cls] = new_cls
return new_cls
class TableIndices(list):
'''
A special list of table indices allowing
for retrieval by column name(s).
Parameters
----------
lst : list
List of indices
'''
def __init__(self, lst):
super().__init__(lst)
def __getitem__(self, item):
'''
Retrieve an item from the list of indices.
Parameters
----------
item : int, str, tuple, or list
Position in list or name(s) of indexed column(s)
'''
if isinstance(item, str):
item = [item]
if isinstance(item, (list, tuple)):
item = list(item)
for index in self:
try:
for name in item:
index.col_position(name)
if len(index.columns) == len(item):
return index
except ValueError:
pass
# index search failed
raise IndexError(f"No index found for {item}")
return super().__getitem__(item)
class TableLoc:
"""
A pseudo-list of Table rows allowing for retrieval
of rows by indexed column values.
Parameters
----------
table : Table
Indexed table to use
"""
def __init__(self, table):
self.table = table
self.indices = table.indices
if len(self.indices) == 0:
raise ValueError("Cannot create TableLoc object with no indices")
def _get_rows(self, item):
"""
Retrieve Table rows indexes by value slice.
"""
if isinstance(item, tuple):
key, item = item
else:
key = self.table.primary_key
index = self.indices[key]
if len(index.columns) > 1:
raise ValueError("Cannot use .loc on multi-column indices")
if isinstance(item, slice):
# None signifies no upper/lower bound
start = MinValue() if item.start is None else item.start
stop = MaxValue() if item.stop is None else item.stop
rows = index.range((start,), (stop,))
else:
if not isinstance(item, (list, np.ndarray)): # single element
item = [item]
# item should be a list or ndarray of values
rows = []
for key in item:
p = index.find((key,))
if len(p) == 0:
raise KeyError(f'No matches found for key {key}')
else:
rows.extend(p)
return rows
def __getitem__(self, item):
"""
Retrieve Table rows by value slice.
Parameters
----------
item : column element, list, ndarray, slice or tuple
Can be a value of the table primary index, a list/ndarray
of such values, or a value slice (both endpoints are included).
If a tuple is provided, the first element must be
an index to use instead of the primary key, and the
second element must be as above.
"""
rows = self._get_rows(item)
if len(rows) == 0: # no matches found
raise KeyError(f'No matches found for key {item}')
elif len(rows) == 1: # single row
return self.table[rows[0]]
return self.table[rows]
def __setitem__(self, key, value):
"""
Assign Table row's by value slice.
Parameters
----------
key : column element, list, ndarray, slice or tuple
Can be a value of the table primary index, a list/ndarray
of such values, or a value slice (both endpoints are included).
If a tuple is provided, the first element must be
an index to use instead of the primary key, and the
second element must be as above.
value : New values of the row elements.
Can be a list of tuples/lists to update the row.
"""
rows = self._get_rows(key)
if len(rows) == 0: # no matches found
raise KeyError(f'No matches found for key {key}')
elif len(rows) == 1: # single row
self.table[rows[0]] = value
else: # multiple rows
if len(rows) == len(value):
for row, val in zip(rows, value):
self.table[row] = val
else:
raise ValueError(f'Right side should contain {len(rows)} values')
class TableLocIndices(TableLoc):
def __getitem__(self, item):
"""
Retrieve Table row's indices by value slice.
Parameters
----------
item : column element, list, ndarray, slice or tuple
Can be a value of the table primary index, a list/ndarray
of such values, or a value slice (both endpoints are included).
If a tuple is provided, the first element must be
an index to use instead of the primary key, and the
second element must be as above.
"""
rows = self._get_rows(item)
if len(rows) == 0: # no matches found
raise KeyError(f'No matches found for key {item}')
elif len(rows) == 1: # single row
return rows[0]
return rows
class TableILoc(TableLoc):
'''
A variant of TableLoc allowing for row retrieval by
indexed order rather than data values.
Parameters
----------
table : Table
Indexed table to use
'''
def __init__(self, table):
super().__init__(table)
def __getitem__(self, item):
if isinstance(item, tuple):
key, item = item
else:
key = self.table.primary_key
index = self.indices[key]
rows = index.sorted_data()[item]
table_slice = self.table[rows]
if len(table_slice) == 0: # no matches found
raise IndexError(f'Invalid index for iloc: {item}')
return table_slice
|
b2dff52a059c47dfea519e01040c91a39202ee66927cf2455b94a95eb57db5df | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from os.path import abspath, dirname, join
from .table import Table
import astropy.io.registry as io_registry
import astropy.config as _config
from astropy import extern
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.table.jsviewer`.
"""
jquery_url = _config.ConfigItem(
'https://code.jquery.com/jquery-3.6.0.min.js',
'The URL to the jquery library.')
datatables_url = _config.ConfigItem(
'https://cdn.datatables.net/1.10.12/js/jquery.dataTables.min.js',
'The URL to the jquery datatables library.')
css_urls = _config.ConfigItem(
['https://cdn.datatables.net/1.10.12/css/jquery.dataTables.css'],
'The URLs to the css file(s) to include.', cfgtype='string_list')
conf = Conf()
EXTERN_JS_DIR = abspath(join(dirname(extern.__file__), 'jquery', 'data', 'js'))
EXTERN_CSS_DIR = abspath(join(dirname(extern.__file__), 'jquery', 'data', 'css'))
_SORTING_SCRIPT_PART_1 = """
var astropy_sort_num = function(a, b) {{
var a_num = parseFloat(a);
var b_num = parseFloat(b);
if (isNaN(a_num) && isNaN(b_num))
return ((a < b) ? -1 : ((a > b) ? 1 : 0));
else if (!isNaN(a_num) && !isNaN(b_num))
return ((a_num < b_num) ? -1 : ((a_num > b_num) ? 1 : 0));
else
return isNaN(a_num) ? -1 : 1;
}}
"""
_SORTING_SCRIPT_PART_2 = """
jQuery.extend( jQuery.fn.dataTableExt.oSort, {{
"optionalnum-asc": astropy_sort_num,
"optionalnum-desc": function (a,b) {{ return -astropy_sort_num(a, b); }}
}});
"""
IPYNB_JS_SCRIPT = """
<script>
%(sorting_script1)s
require.config({{paths: {{
datatables: '{datatables_url}'
}}}});
require(["datatables"], function(){{
console.log("$('#{tid}').dataTable()");
%(sorting_script2)s
$('#{tid}').dataTable({{
order: [],
pageLength: {display_length},
lengthMenu: {display_length_menu},
pagingType: "full_numbers",
columnDefs: [{{targets: {sort_columns}, type: "optionalnum"}}]
}});
}});
</script>
""" % dict(sorting_script1=_SORTING_SCRIPT_PART_1,
sorting_script2=_SORTING_SCRIPT_PART_2)
HTML_JS_SCRIPT = _SORTING_SCRIPT_PART_1 + _SORTING_SCRIPT_PART_2 + """
$(document).ready(function() {{
$('#{tid}').dataTable({{
order: [],
pageLength: {display_length},
lengthMenu: {display_length_menu},
pagingType: "full_numbers",
columnDefs: [{{targets: {sort_columns}, type: "optionalnum"}}]
}});
}} );
"""
# Default CSS for the JSViewer writer
DEFAULT_CSS = """\
body {font-family: sans-serif;}
table.dataTable {width: auto !important; margin: 0 !important;}
.dataTables_filter, .dataTables_paginate {float: left !important; margin-left:1em}
"""
# Default CSS used when rendering a table in the IPython notebook
DEFAULT_CSS_NB = """\
table.dataTable {clear: both; width: auto !important; margin: 0 !important;}
.dataTables_info, .dataTables_length, .dataTables_filter, .dataTables_paginate{
display: inline-block; margin-right: 1em; }
.paginate_button { margin-right: 5px; }
"""
class JSViewer:
"""Provides an interactive HTML export of a Table.
This class provides an interface to the `DataTables
<https://datatables.net/>`_ library, which allow to visualize interactively
an HTML table. It is used by the `~astropy.table.Table.show_in_browser`
method.
Parameters
----------
use_local_files : bool, optional
Use local files or a CDN for JavaScript libraries. Default False.
display_length : int, optional
Number or rows to show. Default to 50.
"""
def __init__(self, use_local_files=False, display_length=50):
self._use_local_files = use_local_files
self.display_length_menu = [[10, 25, 50, 100, 500, 1000, -1],
[10, 25, 50, 100, 500, 1000, "All"]]
self.display_length = display_length
for L in self.display_length_menu:
if display_length not in L:
L.insert(0, display_length)
@property
def jquery_urls(self):
if self._use_local_files:
return ['file://' + join(EXTERN_JS_DIR, 'jquery-3.6.0.min.js'),
'file://' + join(EXTERN_JS_DIR, 'jquery.dataTables.min.js')]
else:
return [conf.jquery_url, conf.datatables_url]
@property
def css_urls(self):
if self._use_local_files:
return ['file://' + join(EXTERN_CSS_DIR,
'jquery.dataTables.css')]
else:
return conf.css_urls
def _jstable_file(self):
if self._use_local_files:
return 'file://' + join(EXTERN_JS_DIR, 'jquery.dataTables.min')
else:
return conf.datatables_url[:-3]
def ipynb(self, table_id, css=None, sort_columns='[]'):
html = f'<style>{css if css is not None else DEFAULT_CSS_NB}</style>'
html += IPYNB_JS_SCRIPT.format(
display_length=self.display_length,
display_length_menu=self.display_length_menu,
datatables_url=self._jstable_file(),
tid=table_id, sort_columns=sort_columns)
return html
def html_js(self, table_id='table0', sort_columns='[]'):
return HTML_JS_SCRIPT.format(
display_length=self.display_length,
display_length_menu=self.display_length_menu,
tid=table_id, sort_columns=sort_columns).strip()
def write_table_jsviewer(table, filename, table_id=None, max_lines=5000,
table_class="display compact", jskwargs=None,
css=DEFAULT_CSS, htmldict=None, overwrite=False):
if table_id is None:
table_id = f'table{id(table)}'
jskwargs = jskwargs or {}
jsv = JSViewer(**jskwargs)
sortable_columns = [i for i, col in enumerate(table.columns.values())
if col.info.dtype.kind in 'iufc']
html_options = {
'table_id': table_id,
'table_class': table_class,
'css': css,
'cssfiles': jsv.css_urls,
'jsfiles': jsv.jquery_urls,
'js': jsv.html_js(table_id=table_id, sort_columns=sortable_columns)
}
if htmldict:
html_options.update(htmldict)
if max_lines < len(table):
table = table[:max_lines]
table.write(filename, format='html', htmldict=html_options,
overwrite=overwrite)
io_registry.register_writer('jsviewer', Table, write_table_jsviewer)
|
8204912dcabfd5b979127115e5ae6b008547d0b34fec4ee53cbaafdeef58ebd8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from .index import SlicedIndex, TableIndices, TableLoc, TableILoc, TableLocIndices
import sys
from collections import OrderedDict, defaultdict
from collections.abc import Mapping
import warnings
from copy import deepcopy
import types
import itertools
import weakref
import numpy as np
from numpy import ma
from astropy import log
from astropy.units import Quantity, QuantityInfo
from astropy.utils import isiterable, ShapedLikeNDArray
from astropy.utils.console import color_print
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.masked import Masked
from astropy.utils.metadata import MetaData, MetaAttribute
from astropy.utils.data_info import BaseColumnInfo, MixinInfo, DataInfo
from astropy.utils.decorators import format_doc
from astropy.io.registry import UnifiedReadWriteMethod
from . import groups
from .pprint import TableFormatter
from .column import (BaseColumn, Column, MaskedColumn, _auto_names, FalseArray,
col_copy, _convert_sequence_data_to_array)
from .row import Row
from .info import TableInfo
from .index import Index, _IndexModeContext, get_index
from .connect import TableRead, TableWrite
from .ndarray_mixin import NdarrayMixin
from .mixins.registry import get_mixin_handler
from . import conf
_implementation_notes = """
This string has informal notes concerning Table implementation for developers.
Things to remember:
- Table has customizable attributes ColumnClass, Column, MaskedColumn.
Table.Column is normally just column.Column (same w/ MaskedColumn)
but in theory they can be different. Table.ColumnClass is the default
class used to create new non-mixin columns, and this is a function of
the Table.masked attribute. Column creation / manipulation in a Table
needs to respect these.
- Column objects that get inserted into the Table.columns attribute must
have the info.parent_table attribute set correctly. Beware just dropping
an object into the columns dict since an existing column may
be part of another Table and have parent_table set to point at that
table. Dropping that column into `columns` of this Table will cause
a problem for the old one so the column object needs to be copied (but
not necessarily the data).
Currently replace_column is always making a copy of both object and
data if parent_table is set. This could be improved but requires a
generic way to copy a mixin object but not the data.
- Be aware of column objects that have indices set.
- `cls.ColumnClass` is a property that effectively uses the `masked` attribute
to choose either `cls.Column` or `cls.MaskedColumn`.
"""
__doctest_skip__ = ['Table.read', 'Table.write', 'Table._read',
'Table.convert_bytestring_to_unicode',
'Table.convert_unicode_to_bytestring',
]
__doctest_requires__ = {'*pandas': ['pandas>=1.1']}
_pprint_docs = """
{__doc__}
Parameters
----------
max_lines : int or None
Maximum number of lines in table output.
max_width : int or None
Maximum character width of output.
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is False.
align : str or list or tuple or None
Left/right alignment of columns. Default is right (None) for all
columns. Other allowed values are '>', '<', '^', and '0=' for
right, left, centered, and 0-padded, respectively. A list of
strings can be provided for alignment of tables with multiple
columns.
"""
_pformat_docs = """
{__doc__}
Parameters
----------
max_lines : int or None
Maximum number of rows to output
max_width : int or None
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is True.
html : bool
Format the output as an HTML table. Default is False.
tableid : str or None
An ID tag for the table; only used if html is set. Default is
"table{id}", where id is the unique integer id of the table object,
id(self)
align : str or list or tuple or None
Left/right alignment of columns. Default is right (None) for all
columns. Other allowed values are '>', '<', '^', and '0=' for
right, left, centered, and 0-padded, respectively. A list of
strings can be provided for alignment of tables with multiple
columns.
tableclass : str or list of str or None
CSS classes for the table; only used if html is set. Default is
None.
Returns
-------
lines : list
Formatted table as a list of strings.
"""
class TableReplaceWarning(UserWarning):
"""
Warning class for cases when a table column is replaced via the
Table.__setitem__ syntax e.g. t['a'] = val.
This does not inherit from AstropyWarning because we want to use
stacklevel=3 to show the user where the issue occurred in their code.
"""
pass
def descr(col):
"""Array-interface compliant full description of a column.
This returns a 3-tuple (name, type, shape) that can always be
used in a structured array dtype definition.
"""
col_dtype = 'O' if (col.info.dtype is None) else col.info.dtype
col_shape = col.shape[1:] if hasattr(col, 'shape') else ()
return (col.info.name, col_dtype, col_shape)
def has_info_class(obj, cls):
"""Check if the object's info is an instance of cls."""
# We check info on the class of the instance, since on the instance
# itself accessing 'info' has side effects in that it sets
# obj.__dict__['info'] if it does not exist already.
return isinstance(getattr(obj.__class__, 'info', None), cls)
def _get_names_from_list_of_dict(rows):
"""Return list of column names if ``rows`` is a list of dict that
defines table data.
If rows is not a list of dict then return None.
"""
if rows is None:
return None
names = set()
for row in rows:
if not isinstance(row, Mapping):
return None
names.update(row)
return list(names)
# Note to future maintainers: when transitioning this to dict
# be sure to change the OrderedDict ref(s) in Row and in __len__().
class TableColumns(OrderedDict):
"""OrderedDict subclass for a set of columns.
This class enhances item access to provide convenient access to columns
by name or index, including slice access. It also handles renaming
of columns.
The initialization argument ``cols`` can be a list of ``Column`` objects
or any structure that is valid for initializing a Python dict. This
includes a dict, list of (key, val) tuples or [key, val] lists, etc.
Parameters
----------
cols : dict, list, tuple; optional
Column objects as data structure that can init dict (see above)
"""
def __init__(self, cols={}):
if isinstance(cols, (list, tuple)):
# `cols` should be a list of two-tuples, but it is allowed to have
# columns (BaseColumn or mixins) in the list.
newcols = []
for col in cols:
if has_info_class(col, BaseColumnInfo):
newcols.append((col.info.name, col))
else:
newcols.append(col)
cols = newcols
super().__init__(cols)
def __getitem__(self, item):
"""Get items from a TableColumns object.
::
tc = TableColumns(cols=[Column(name='a'), Column(name='b'), Column(name='c')])
tc['a'] # Column('a')
tc[1] # Column('b')
tc['a', 'b'] # <TableColumns names=('a', 'b')>
tc[1:3] # <TableColumns names=('b', 'c')>
"""
if isinstance(item, str):
return OrderedDict.__getitem__(self, item)
elif isinstance(item, (int, np.integer)):
return list(self.values())[item]
elif (isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == 'i'):
return list(self.values())[item.item()]
elif isinstance(item, tuple):
return self.__class__([self[x] for x in item])
elif isinstance(item, slice):
return self.__class__([self[x] for x in list(self)[item]])
else:
raise IndexError('Illegal key or index value for {} object'
.format(self.__class__.__name__))
def __setitem__(self, item, value, validated=False):
"""
Set item in this dict instance, but do not allow directly replacing an
existing column unless it is already validated (and thus is certain to
not corrupt the table).
NOTE: it is easily possible to corrupt a table by directly *adding* a new
key to the TableColumns attribute of a Table, e.g.
``t.columns['jane'] = 'doe'``.
"""
if item in self and not validated:
raise ValueError("Cannot replace column '{}'. Use Table.replace_column() instead."
.format(item))
super().__setitem__(item, value)
def __repr__(self):
names = (f"'{x}'" for x in self.keys())
return f"<{self.__class__.__name__} names=({','.join(names)})>"
def _rename_column(self, name, new_name):
if name == new_name:
return
if new_name in self:
raise KeyError(f"Column {new_name} already exists")
# Rename column names in pprint include/exclude attributes as needed
parent_table = self[name].info.parent_table
if parent_table is not None:
parent_table.pprint_exclude_names._rename(name, new_name)
parent_table.pprint_include_names._rename(name, new_name)
mapper = {name: new_name}
new_names = [mapper.get(name, name) for name in self]
cols = list(self.values())
self.clear()
self.update(list(zip(new_names, cols)))
def __delitem__(self, name):
# Remove column names from pprint include/exclude attributes as needed.
# __delitem__ also gets called for pop() and popitem().
parent_table = self[name].info.parent_table
if parent_table is not None:
# _remove() method does not require that `name` is in the attribute
parent_table.pprint_exclude_names._remove(name)
parent_table.pprint_include_names._remove(name)
return super().__delitem__(name)
def isinstance(self, cls):
"""
Return a list of columns which are instances of the specified classes.
Parameters
----------
cls : class or tuple thereof
Column class (including mixin) or tuple of Column classes.
Returns
-------
col_list : list of `Column`
List of Column objects which are instances of given classes.
"""
cols = [col for col in self.values() if isinstance(col, cls)]
return cols
def not_isinstance(self, cls):
"""
Return a list of columns which are not instances of the specified classes.
Parameters
----------
cls : class or tuple thereof
Column class (including mixin) or tuple of Column classes.
Returns
-------
col_list : list of `Column`
List of Column objects which are not instances of given classes.
"""
cols = [col for col in self.values() if not isinstance(col, cls)]
return cols
class TableAttribute(MetaAttribute):
"""
Descriptor to define a custom attribute for a Table subclass.
The value of the ``TableAttribute`` will be stored in a dict named
``__attributes__`` that is stored in the table ``meta``. The attribute
can be accessed and set in the usual way, and it can be provided when
creating the object.
Defining an attribute by this mechanism ensures that it will persist if
the table is sliced or serialized, for example as a pickle or ECSV file.
See the `~astropy.utils.metadata.MetaAttribute` documentation for additional
details.
Parameters
----------
default : object
Default value for attribute
Examples
--------
>>> from astropy.table import Table, TableAttribute
>>> class MyTable(Table):
... identifier = TableAttribute(default=1)
>>> t = MyTable(identifier=10)
>>> t.identifier
10
>>> t.meta
OrderedDict([('__attributes__', {'identifier': 10})])
"""
class PprintIncludeExclude(TableAttribute):
"""Maintain tuple that controls table column visibility for print output.
This is a descriptor that inherits from MetaAttribute so that the attribute
value is stored in the table meta['__attributes__'].
This gets used for the ``pprint_include_names`` and ``pprint_exclude_names`` Table
attributes.
"""
def __get__(self, instance, owner_cls):
"""Get the attribute.
This normally returns an instance of this class which is stored on the
owner object.
"""
# For getting from class not an instance
if instance is None:
return self
# If not already stored on `instance`, make a copy of the class
# descriptor object and put it onto the instance.
value = instance.__dict__.get(self.name)
if value is None:
value = deepcopy(self)
instance.__dict__[self.name] = value
# We set _instance_ref on every call, since if one makes copies of
# instances, this attribute will be copied as well, which will lose the
# reference.
value._instance_ref = weakref.ref(instance)
return value
def __set__(self, instance, names):
"""Set value of ``instance`` attribute to ``names``.
Parameters
----------
instance : object
Instance that owns the attribute
names : None, str, list, tuple
Column name(s) to store, or None to clear
"""
if isinstance(names, str):
names = [names]
if names is None:
# Remove attribute value from the meta['__attributes__'] dict.
# Subsequent access will just return None.
delattr(instance, self.name)
else:
# This stores names into instance.meta['__attributes__'] as tuple
return super().__set__(instance, tuple(names))
def __call__(self):
"""Get the value of the attribute.
Returns
-------
names : None, tuple
Include/exclude names
"""
# Get the value from instance.meta['__attributes__']
instance = self._instance_ref()
return super().__get__(instance, instance.__class__)
def __repr__(self):
if hasattr(self, '_instance_ref'):
out = f'<{self.__class__.__name__} name={self.name} value={self()}>'
else:
out = super().__repr__()
return out
def _add_remove_setup(self, names):
"""Common setup for add and remove.
- Coerce attribute value to a list
- Coerce names into a list
- Get the parent table instance
"""
names = [names] if isinstance(names, str) else list(names)
# Get the value. This is the same as self() but we need `instance` here.
instance = self._instance_ref()
value = super().__get__(instance, instance.__class__)
value = [] if value is None else list(value)
return instance, names, value
def add(self, names):
"""Add ``names`` to the include/exclude attribute.
Parameters
----------
names : str, list, tuple
Column name(s) to add
"""
instance, names, value = self._add_remove_setup(names)
value.extend(name for name in names if name not in value)
super().__set__(instance, tuple(value))
def remove(self, names):
"""Remove ``names`` from the include/exclude attribute.
Parameters
----------
names : str, list, tuple
Column name(s) to remove
"""
self._remove(names, raise_exc=True)
def _remove(self, names, raise_exc=False):
"""Remove ``names`` with optional checking if they exist"""
instance, names, value = self._add_remove_setup(names)
# Return now if there are no attributes and thus no action to be taken.
if not raise_exc and '__attributes__' not in instance.meta:
return
# Remove one by one, optionally raising an exception if name is missing.
for name in names:
if name in value:
value.remove(name) # Using the list.remove method
elif raise_exc:
raise ValueError(f'{name} not in {self.name}')
# Change to either None or a tuple for storing back to attribute
value = None if value == [] else tuple(value)
self.__set__(instance, value)
def _rename(self, name, new_name):
"""Rename ``name`` to ``new_name`` if ``name`` is in the list"""
names = self() or ()
if name in names:
new_names = list(names)
new_names[new_names.index(name)] = new_name
self.set(new_names)
def set(self, names):
"""Set value of include/exclude attribute to ``names``.
Parameters
----------
names : None, str, list, tuple
Column name(s) to store, or None to clear
"""
class _Context:
def __init__(self, descriptor_self):
self.descriptor_self = descriptor_self
self.names_orig = descriptor_self()
def __enter__(self):
pass
def __exit__(self, type, value, tb):
descriptor_self = self.descriptor_self
instance = descriptor_self._instance_ref()
descriptor_self.__set__(instance, self.names_orig)
def __repr__(self):
return repr(self.descriptor_self)
ctx = _Context(descriptor_self=self)
instance = self._instance_ref()
self.__set__(instance, names)
return ctx
class Table:
"""A class to represent tables of heterogeneous data.
`~astropy.table.Table` provides a class for heterogeneous tabular data.
A key enhancement provided by the `~astropy.table.Table` class over
e.g. a `numpy` structured array is the ability to easily modify the
structure of the table by adding or removing columns, or adding new
rows of data. In addition table and column metadata are fully supported.
`~astropy.table.Table` differs from `~astropy.nddata.NDData` by the
assumption that the input data consists of columns of homogeneous data,
where each column has a unique identifier and may contain additional
metadata such as the data unit, format, and description.
See also: https://docs.astropy.org/en/stable/table/
Parameters
----------
data : numpy ndarray, dict, list, table-like object, optional
Data to initialize table.
masked : bool, optional
Specify whether the table is masked.
names : list, optional
Specify column names.
dtype : list, optional
Specify column data types.
meta : dict, optional
Metadata associated with the table.
copy : bool, optional
Copy the input data. If the input is a Table the ``meta`` is always
copied regardless of the ``copy`` parameter.
Default is True.
rows : numpy ndarray, list of list, optional
Row-oriented data for table instead of ``data`` argument.
copy_indices : bool, optional
Copy any indices in the input data. Default is True.
units : list, dict, optional
List or dict of units to apply to columns.
descriptions : list, dict, optional
List or dict of descriptions to apply to columns.
**kwargs : dict, optional
Additional keyword args when converting table-like object.
"""
meta = MetaData(copy=False)
# Define class attributes for core container objects to allow for subclass
# customization.
Row = Row
Column = Column
MaskedColumn = MaskedColumn
TableColumns = TableColumns
TableFormatter = TableFormatter
# Unified I/O read and write methods from .connect
read = UnifiedReadWriteMethod(TableRead)
write = UnifiedReadWriteMethod(TableWrite)
pprint_exclude_names = PprintIncludeExclude()
pprint_include_names = PprintIncludeExclude()
def as_array(self, keep_byteorder=False, names=None):
"""
Return a new copy of the table in the form of a structured np.ndarray or
np.ma.MaskedArray object (as appropriate).
Parameters
----------
keep_byteorder : bool, optional
By default the returned array has all columns in native byte
order. However, if this option is `True` this preserves the
byte order of all columns (if any are non-native).
names : list, optional:
List of column names to include for returned structured array.
Default is to include all table columns.
Returns
-------
table_array : array or `~numpy.ma.MaskedArray`
Copy of table as a numpy structured array.
ndarray for unmasked or `~numpy.ma.MaskedArray` for masked.
"""
masked = self.masked or self.has_masked_columns or self.has_masked_values
empty_init = ma.empty if masked else np.empty
if len(self.columns) == 0:
return empty_init(0, dtype=None)
dtype = []
cols = self.columns.values()
if names is not None:
cols = [col for col in cols if col.info.name in names]
for col in cols:
col_descr = descr(col)
if not (col.info.dtype.isnative or keep_byteorder):
new_dt = np.dtype(col_descr[1]).newbyteorder('=')
col_descr = (col_descr[0], new_dt, col_descr[2])
dtype.append(col_descr)
data = empty_init(len(self), dtype=dtype)
for col in cols:
# When assigning from one array into a field of a structured array,
# Numpy will automatically swap those columns to their destination
# byte order where applicable
data[col.info.name] = col
# For masked out, masked mixin columns need to set output mask attribute.
if masked and has_info_class(col, MixinInfo) and hasattr(col, 'mask'):
data[col.info.name].mask = col.mask
return data
def __init__(self, data=None, masked=False, names=None, dtype=None,
meta=None, copy=True, rows=None, copy_indices=True,
units=None, descriptions=None,
**kwargs):
# Set up a placeholder empty table
self._set_masked(masked)
self.columns = self.TableColumns()
self.formatter = self.TableFormatter()
self._copy_indices = True # copy indices from this Table by default
self._init_indices = copy_indices # whether to copy indices in init
self.primary_key = None
# Must copy if dtype are changing
if not copy and dtype is not None:
raise ValueError('Cannot specify dtype when copy=False')
# Specifies list of names found for the case of initializing table with
# a list of dict. If data are not list of dict then this is None.
names_from_list_of_dict = None
# Row-oriented input, e.g. list of lists or list of tuples, list of
# dict, Row instance. Set data to something that the subsequent code
# will parse correctly.
if rows is not None:
if data is not None:
raise ValueError('Cannot supply both `data` and `rows` values')
if isinstance(rows, types.GeneratorType):
# Without this then the all(..) test below uses up the generator
rows = list(rows)
# Get column names if `rows` is a list of dict, otherwise this is None
names_from_list_of_dict = _get_names_from_list_of_dict(rows)
if names_from_list_of_dict:
data = rows
elif isinstance(rows, self.Row):
data = rows
else:
data = list(zip(*rows))
# Infer the type of the input data and set up the initialization
# function, number of columns, and potentially the default col names
default_names = None
# Handle custom (subclass) table attributes that are stored in meta.
# These are defined as class attributes using the TableAttribute
# descriptor. Any such attributes get removed from kwargs here and
# stored for use after the table is otherwise initialized. Any values
# provided via kwargs will have precedence over existing values from
# meta (e.g. from data as a Table or meta via kwargs).
meta_table_attrs = {}
if kwargs:
for attr in list(kwargs):
descr = getattr(self.__class__, attr, None)
if isinstance(descr, TableAttribute):
meta_table_attrs[attr] = kwargs.pop(attr)
if hasattr(data, '__astropy_table__'):
# Data object implements the __astropy_table__ interface method.
# Calling that method returns an appropriate instance of
# self.__class__ and respects the `copy` arg. The returned
# Table object should NOT then be copied.
data = data.__astropy_table__(self.__class__, copy, **kwargs)
copy = False
elif kwargs:
raise TypeError('__init__() got unexpected keyword argument {!r}'
.format(list(kwargs.keys())[0]))
if (isinstance(data, np.ndarray)
and data.shape == (0,)
and not data.dtype.names):
data = None
if isinstance(data, self.Row):
data = data._table[data._index:data._index + 1]
if isinstance(data, (list, tuple)):
# Get column names from `data` if it is a list of dict, otherwise this is None.
# This might be previously defined if `rows` was supplied as an init arg.
names_from_list_of_dict = (names_from_list_of_dict
or _get_names_from_list_of_dict(data))
if names_from_list_of_dict:
init_func = self._init_from_list_of_dicts
n_cols = len(names_from_list_of_dict)
else:
init_func = self._init_from_list
n_cols = len(data)
elif isinstance(data, np.ndarray):
if data.dtype.names:
init_func = self._init_from_ndarray # _struct
n_cols = len(data.dtype.names)
default_names = data.dtype.names
else:
init_func = self._init_from_ndarray # _homog
if data.shape == ():
raise ValueError('Can not initialize a Table with a scalar')
elif len(data.shape) == 1:
data = data[np.newaxis, :]
n_cols = data.shape[1]
elif isinstance(data, Mapping):
init_func = self._init_from_dict
default_names = list(data)
n_cols = len(default_names)
elif isinstance(data, Table):
# If user-input meta is None then use data.meta (if non-trivial)
if meta is None and data.meta:
# At this point do NOT deepcopy data.meta as this will happen after
# table init_func() is called. But for table input the table meta
# gets a key copy here if copy=False because later a direct object ref
# is used.
meta = data.meta if copy else data.meta.copy()
# Handle indices on input table. Copy primary key and don't copy indices
# if the input Table is in non-copy mode.
self.primary_key = data.primary_key
self._init_indices = self._init_indices and data._copy_indices
# Extract default names, n_cols, and then overwrite ``data`` to be the
# table columns so we can use _init_from_list.
default_names = data.colnames
n_cols = len(default_names)
data = list(data.columns.values())
init_func = self._init_from_list
elif data is None:
if names is None:
if dtype is None:
# Table was initialized as `t = Table()`. Set up for empty
# table with names=[], data=[], and n_cols=0.
# self._init_from_list() will simply return, giving the
# expected empty table.
names = []
else:
try:
# No data nor names but dtype is available. This must be
# valid to initialize a structured array.
dtype = np.dtype(dtype)
names = dtype.names
dtype = [dtype[name] for name in names]
except Exception:
raise ValueError('dtype was specified but could not be '
'parsed for column names')
# names is guaranteed to be set at this point
init_func = self._init_from_list
n_cols = len(names)
data = [[]] * n_cols
else:
raise ValueError(f'Data type {type(data)} not allowed to init Table')
# Set up defaults if names and/or dtype are not specified.
# A value of None means the actual value will be inferred
# within the appropriate initialization routine, either from
# existing specification or auto-generated.
if dtype is None:
dtype = [None] * n_cols
elif isinstance(dtype, np.dtype):
if default_names is None:
default_names = dtype.names
# Convert a numpy dtype input to a list of dtypes for later use.
dtype = [dtype[name] for name in dtype.names]
if names is None:
names = default_names or [None] * n_cols
names = [None if name is None else str(name) for name in names]
self._check_names_dtype(names, dtype, n_cols)
# Finally do the real initialization
init_func(data, names, dtype, n_cols, copy)
# Set table meta. If copy=True then deepcopy meta otherwise use the
# user-supplied meta directly.
if meta is not None:
self.meta = deepcopy(meta) if copy else meta
# Update meta with TableAttributes supplied as kwargs in Table init.
# This takes precedence over previously-defined meta.
if meta_table_attrs:
for attr, value in meta_table_attrs.items():
setattr(self, attr, value)
# Whatever happens above, the masked property should be set to a boolean
if self.masked not in (None, True, False):
raise TypeError("masked property must be None, True or False")
self._set_column_attribute('unit', units)
self._set_column_attribute('description', descriptions)
def _set_column_attribute(self, attr, values):
"""Set ``attr`` for columns to ``values``, which can be either a dict (keyed by column
name) or a dict of name: value pairs. This is used for handling the ``units`` and
``descriptions`` kwargs to ``__init__``.
"""
if not values:
return
if isinstance(values, Row):
# For a Row object transform to an equivalent dict.
values = {name: values[name] for name in values.colnames}
if not isinstance(values, Mapping):
# If not a dict map, assume iterable and map to dict if the right length
if len(values) != len(self.columns):
raise ValueError(f'sequence of {attr} values must match number of columns')
values = dict(zip(self.colnames, values))
for name, value in values.items():
if name not in self.columns:
raise ValueError(f'invalid column name {name} for setting {attr} attribute')
# Special case: ignore unit if it is an empty or blank string
if attr == 'unit' and isinstance(value, str):
if value.strip() == '':
value = None
if value not in (np.ma.masked, None):
setattr(self[name].info, attr, value)
def __getstate__(self):
columns = OrderedDict((key, col if isinstance(col, BaseColumn) else col_copy(col))
for key, col in self.columns.items())
return (columns, self.meta)
def __setstate__(self, state):
columns, meta = state
self.__init__(columns, meta=meta)
@property
def mask(self):
# Dynamic view of available masks
if self.masked or self.has_masked_columns or self.has_masked_values:
mask_table = Table([getattr(col, 'mask', FalseArray(col.shape))
for col in self.itercols()],
names=self.colnames, copy=False)
# Set hidden attribute to force inplace setitem so that code like
# t.mask['a'] = [1, 0, 1] will correctly set the underlying mask.
# See #5556 for discussion.
mask_table._setitem_inplace = True
else:
mask_table = None
return mask_table
@mask.setter
def mask(self, val):
self.mask[:] = val
@property
def _mask(self):
"""This is needed so that comparison of a masked Table and a
MaskedArray works. The requirement comes from numpy.ma.core
so don't remove this property."""
return self.as_array().mask
def filled(self, fill_value=None):
"""Return copy of self, with masked values filled.
If input ``fill_value`` supplied then that value is used for all
masked entries in the table. Otherwise the individual
``fill_value`` defined for each table column is used.
Parameters
----------
fill_value : str
If supplied, this ``fill_value`` is used for all masked entries
in the entire table.
Returns
-------
filled_table : `~astropy.table.Table`
New table with masked values filled
"""
if self.masked or self.has_masked_columns or self.has_masked_values:
# Get new columns with masked values filled, then create Table with those
# new cols (copy=False) but deepcopy the meta.
data = [col.filled(fill_value) if hasattr(col, 'filled') else col
for col in self.itercols()]
return self.__class__(data, meta=deepcopy(self.meta), copy=False)
else:
# Return copy of the original object.
return self.copy()
@property
def indices(self):
'''
Return the indices associated with columns of the table
as a TableIndices object.
'''
lst = []
for column in self.columns.values():
for index in column.info.indices:
if sum(index is x for x in lst) == 0: # ensure uniqueness
lst.append(index)
return TableIndices(lst)
@property
def loc(self):
'''
Return a TableLoc object that can be used for retrieving
rows by index in a given data range. Note that both loc
and iloc work only with single-column indices.
'''
return TableLoc(self)
@property
def loc_indices(self):
"""
Return a TableLocIndices object that can be used for retrieving
the row indices corresponding to given table index key value or values.
"""
return TableLocIndices(self)
@property
def iloc(self):
'''
Return a TableILoc object that can be used for retrieving
indexed rows in the order they appear in the index.
'''
return TableILoc(self)
def add_index(self, colnames, engine=None, unique=False):
'''
Insert a new index among one or more columns.
If there are no indices, make this index the
primary table index.
Parameters
----------
colnames : str or list
List of column names (or a single column name) to index
engine : type or None
Indexing engine class to use, either `~astropy.table.SortedArray`,
`~astropy.table.BST`, or `~astropy.table.SCEngine`. If the supplied
argument is None (by default), use `~astropy.table.SortedArray`.
unique : bool
Whether the values of the index must be unique. Default is False.
'''
if isinstance(colnames, str):
colnames = (colnames,)
columns = self.columns[tuple(colnames)].values()
# make sure all columns support indexing
for col in columns:
if not getattr(col.info, '_supports_indexing', False):
raise ValueError('Cannot create an index on column "{}", of '
'type "{}"'.format(col.info.name, type(col)))
is_primary = not self.indices
index = Index(columns, engine=engine, unique=unique)
sliced_index = SlicedIndex(index, slice(0, 0, None), original=True)
if is_primary:
self.primary_key = colnames
for col in columns:
col.info.indices.append(sliced_index)
def remove_indices(self, colname):
'''
Remove all indices involving the given column.
If the primary index is removed, the new primary
index will be the most recently added remaining
index.
Parameters
----------
colname : str
Name of column
'''
col = self.columns[colname]
for index in self.indices:
try:
index.col_position(col.info.name)
except ValueError:
pass
else:
for c in index.columns:
c.info.indices.remove(index)
def index_mode(self, mode):
'''
Return a context manager for an indexing mode.
Parameters
----------
mode : str
Either 'freeze', 'copy_on_getitem', or 'discard_on_copy'.
In 'discard_on_copy' mode,
indices are not copied whenever columns or tables are copied.
In 'freeze' mode, indices are not modified whenever columns are
modified; at the exit of the context, indices refresh themselves
based on column values. This mode is intended for scenarios in
which one intends to make many additions or modifications in an
indexed column.
In 'copy_on_getitem' mode, indices are copied when taking column
slices as well as table slices, so col[i0:i1] will preserve
indices.
'''
return _IndexModeContext(self, mode)
def __array__(self, dtype=None):
"""Support converting Table to np.array via np.array(table).
Coercion to a different dtype via np.array(table, dtype) is not
supported and will raise a ValueError.
"""
if dtype is not None:
raise ValueError('Datatype coercion is not allowed')
# This limitation is because of the following unexpected result that
# should have made a table copy while changing the column names.
#
# >>> d = astropy.table.Table([[1,2],[3,4]])
# >>> np.array(d, dtype=[('a', 'i8'), ('b', 'i8')])
# array([(0, 0), (0, 0)],
# dtype=[('a', '<i8'), ('b', '<i8')])
out = self.as_array()
return out.data if isinstance(out, np.ma.MaskedArray) else out
def _check_names_dtype(self, names, dtype, n_cols):
"""Make sure that names and dtype are both iterable and have
the same length as data.
"""
for inp_list, inp_str in ((dtype, 'dtype'), (names, 'names')):
if not isiterable(inp_list):
raise ValueError(f'{inp_str} must be a list or None')
if len(names) != n_cols or len(dtype) != n_cols:
raise ValueError(
'Arguments "names" and "dtype" must match number of columns')
def _init_from_list_of_dicts(self, data, names, dtype, n_cols, copy):
"""Initialize table from a list of dictionaries representing rows."""
# Define placeholder for missing values as a unique object that cannot
# every occur in user data.
MISSING = object()
# Gather column names that exist in the input `data`.
names_from_data = set()
for row in data:
names_from_data.update(row)
if set(data[0].keys()) == names_from_data:
names_from_data = list(data[0].keys())
else:
names_from_data = sorted(names_from_data)
# Note: if set(data[0].keys()) != names_from_data, this will give an
# exception later, so NO need to catch here.
# Convert list of dict into dict of list (cols), keep track of missing
# indexes and put in MISSING placeholders in the `cols` lists.
cols = {}
missing_indexes = defaultdict(list)
for name in names_from_data:
cols[name] = []
for ii, row in enumerate(data):
try:
val = row[name]
except KeyError:
missing_indexes[name].append(ii)
val = MISSING
cols[name].append(val)
# Fill the missing entries with first values
if missing_indexes:
for name, indexes in missing_indexes.items():
col = cols[name]
first_val = next(val for val in col if val is not MISSING)
for index in indexes:
col[index] = first_val
# prepare initialization
if all(name is None for name in names):
names = names_from_data
self._init_from_dict(cols, names, dtype, n_cols, copy)
# Mask the missing values if necessary, converting columns to MaskedColumn
# as needed.
if missing_indexes:
for name, indexes in missing_indexes.items():
col = self[name]
# Ensure that any Column subclasses with MISSING values can support
# setting masked values. As of astropy 4.0 the test condition below is
# always True since _init_from_dict cannot result in mixin columns.
if isinstance(col, Column) and not isinstance(col, MaskedColumn):
self[name] = self.MaskedColumn(col, copy=False)
# Finally do the masking in a mixin-safe way.
self[name][indexes] = np.ma.masked
return
def _init_from_list(self, data, names, dtype, n_cols, copy):
"""Initialize table from a list of column data. A column can be a
Column object, np.ndarray, mixin, or any other iterable object.
"""
# Special case of initializing an empty table like `t = Table()`. No
# action required at this point.
if n_cols == 0:
return
cols = []
default_names = _auto_names(n_cols)
for col, name, default_name, dtype in zip(data, names, default_names, dtype):
col = self._convert_data_to_col(col, copy, default_name, dtype, name)
cols.append(col)
self._init_from_cols(cols)
def _convert_data_to_col(self, data, copy=True, default_name=None, dtype=None, name=None):
"""
Convert any allowed sequence data ``col`` to a column object that can be used
directly in the self.columns dict. This could be a Column, MaskedColumn,
or mixin column.
The final column name is determined by::
name or data.info.name or def_name
If ``data`` has no ``info`` then ``name = name or def_name``.
The behavior of ``copy`` for Column objects is:
- copy=True: new class instance with a copy of data and deep copy of meta
- copy=False: new class instance with same data and a key-only copy of meta
For mixin columns:
- copy=True: new class instance with copy of data and deep copy of meta
- copy=False: original instance (no copy at all)
Parameters
----------
data : object (column-like sequence)
Input column data
copy : bool
Make a copy
default_name : str
Default name
dtype : np.dtype or None
Data dtype
name : str or None
Column name
Returns
-------
col : Column, MaskedColumn, mixin-column type
Object that can be used as a column in self
"""
data_is_mixin = self._is_mixin_for_table(data)
masked_col_cls = (self.ColumnClass
if issubclass(self.ColumnClass, self.MaskedColumn)
else self.MaskedColumn)
try:
data0_is_mixin = self._is_mixin_for_table(data[0])
except Exception:
# Need broad exception, cannot predict what data[0] raises for arbitrary data
data0_is_mixin = False
# If the data is not an instance of Column or a mixin class, we can
# check the registry of mixin 'handlers' to see if the column can be
# converted to a mixin class
if (handler := get_mixin_handler(data)) is not None:
original_data = data
data = handler(data)
if not (data_is_mixin := self._is_mixin_for_table(data)):
fully_qualified_name = (original_data.__class__.__module__ + '.'
+ original_data.__class__.__name__)
raise TypeError('Mixin handler for object of type '
f'{fully_qualified_name} '
'did not return a valid mixin column')
# Get the final column name using precedence. Some objects may not
# have an info attribute. Also avoid creating info as a side effect.
if not name:
if isinstance(data, Column):
name = data.name or default_name
elif 'info' in getattr(data, '__dict__', ()):
name = data.info.name or default_name
else:
name = default_name
if isinstance(data, Column):
# If self.ColumnClass is a subclass of col, then "upgrade" to ColumnClass,
# otherwise just use the original class. The most common case is a
# table with masked=True and ColumnClass=MaskedColumn. Then a Column
# gets upgraded to MaskedColumn, but the converse (pre-4.0) behavior
# of downgrading from MaskedColumn to Column (for non-masked table)
# does not happen.
col_cls = self._get_col_cls_for_table(data)
elif data_is_mixin:
# Copy the mixin column attributes if they exist since the copy below
# may not get this attribute.
col = col_copy(data, copy_indices=self._init_indices) if copy else data
col.info.name = name
return col
elif data0_is_mixin:
# Handle case of a sequence of a mixin, e.g. [1*u.m, 2*u.m].
try:
col = data[0].__class__(data)
col.info.name = name
return col
except Exception:
# If that didn't work for some reason, just turn it into np.array of object
data = np.array(data, dtype=object)
col_cls = self.ColumnClass
elif isinstance(data, (np.ma.MaskedArray, Masked)):
# Require that col_cls be a subclass of MaskedColumn, remembering
# that ColumnClass could be a user-defined subclass (though more-likely
# could be MaskedColumn).
col_cls = masked_col_cls
elif data is None:
# Special case for data passed as the None object (for broadcasting
# to an object column). Need to turn data into numpy `None` scalar
# object, otherwise `Column` interprets data=None as no data instead
# of a object column of `None`.
data = np.array(None)
col_cls = self.ColumnClass
elif not hasattr(data, 'dtype'):
# `data` is none of the above, convert to numpy array or MaskedArray
# assuming only that it is a scalar or sequence or N-d nested
# sequence. This function is relatively intricate and tries to
# maintain performance for common cases while handling things like
# list input with embedded np.ma.masked entries. If `data` is a
# scalar then it gets returned unchanged so the original object gets
# passed to `Column` later.
data = _convert_sequence_data_to_array(data, dtype)
copy = False # Already made a copy above
col_cls = masked_col_cls if isinstance(data, np.ma.MaskedArray) else self.ColumnClass
else:
col_cls = self.ColumnClass
try:
col = col_cls(name=name, data=data, dtype=dtype,
copy=copy, copy_indices=self._init_indices)
except Exception:
# Broad exception class since we don't know what might go wrong
raise ValueError('unable to convert data to Column for Table')
col = self._convert_col_for_table(col)
return col
def _init_from_ndarray(self, data, names, dtype, n_cols, copy):
"""Initialize table from an ndarray structured array"""
data_names = data.dtype.names or _auto_names(n_cols)
struct = data.dtype.names is not None
names = [name or data_names[i] for i, name in enumerate(names)]
cols = ([data[name] for name in data_names] if struct else
[data[:, i] for i in range(n_cols)])
self._init_from_list(cols, names, dtype, n_cols, copy)
def _init_from_dict(self, data, names, dtype, n_cols, copy):
"""Initialize table from a dictionary of columns"""
data_list = [data[name] for name in names]
self._init_from_list(data_list, names, dtype, n_cols, copy)
def _get_col_cls_for_table(self, col):
"""Get the correct column class to use for upgrading any Column-like object.
For a masked table, ensure any Column-like object is a subclass
of the table MaskedColumn.
For unmasked table, ensure any MaskedColumn-like object is a subclass
of the table MaskedColumn. If not a MaskedColumn, then ensure that any
Column-like object is a subclass of the table Column.
"""
col_cls = col.__class__
if self.masked:
if isinstance(col, Column) and not isinstance(col, self.MaskedColumn):
col_cls = self.MaskedColumn
else:
if isinstance(col, MaskedColumn):
if not isinstance(col, self.MaskedColumn):
col_cls = self.MaskedColumn
elif isinstance(col, Column) and not isinstance(col, self.Column):
col_cls = self.Column
return col_cls
def _convert_col_for_table(self, col):
"""
Make sure that all Column objects have correct base class for this type of
Table. For a base Table this most commonly means setting to
MaskedColumn if the table is masked. Table subclasses like QTable
override this method.
"""
if isinstance(col, Column) and not isinstance(col, self.ColumnClass):
col_cls = self._get_col_cls_for_table(col)
if col_cls is not col.__class__:
col = col_cls(col, copy=False)
return col
def _init_from_cols(self, cols):
"""Initialize table from a list of Column or mixin objects"""
lengths = {len(col) for col in cols}
if len(lengths) > 1:
raise ValueError(f'Inconsistent data column lengths: {lengths}')
# Make sure that all Column-based objects have correct class. For
# plain Table this is self.ColumnClass, but for instance QTable will
# convert columns with units to a Quantity mixin.
newcols = [self._convert_col_for_table(col) for col in cols]
self._make_table_from_cols(self, newcols)
# Deduplicate indices. It may happen that after pickling or when
# initing from an existing table that column indices which had been
# references to a single index object got *copied* into an independent
# object. This results in duplicates which will cause downstream problems.
index_dict = {}
for col in self.itercols():
for i, index in enumerate(col.info.indices or []):
names = tuple(ind_col.info.name for ind_col in index.columns)
if names in index_dict:
col.info.indices[i] = index_dict[names]
else:
index_dict[names] = index
def _new_from_slice(self, slice_):
"""Create a new table as a referenced slice from self."""
table = self.__class__(masked=self.masked)
if self.meta:
table.meta = self.meta.copy() # Shallow copy for slice
table.primary_key = self.primary_key
newcols = []
for col in self.columns.values():
newcol = col[slice_]
# Note in line below, use direct attribute access to col.indices for Column
# instances instead of the generic col.info.indices. This saves about 4 usec
# per column.
if (col if isinstance(col, Column) else col.info).indices:
# TODO : as far as I can tell the only purpose of setting _copy_indices
# here is to communicate that to the initial test in `slice_indices`.
# Why isn't that just sent as an arg to the function?
col.info._copy_indices = self._copy_indices
newcol = col.info.slice_indices(newcol, slice_, len(col))
# Don't understand why this is forcing a value on the original column.
# Normally col.info does not even have a _copy_indices attribute. Tests
# still pass if this line is deleted. (Each col.info attribute access
# is expensive).
col.info._copy_indices = True
newcols.append(newcol)
self._make_table_from_cols(table, newcols, verify=False, names=self.columns.keys())
return table
@staticmethod
def _make_table_from_cols(table, cols, verify=True, names=None):
"""
Make ``table`` in-place so that it represents the given list of ``cols``.
"""
if names is None:
names = [col.info.name for col in cols]
# Note: we do not test for len(names) == len(cols) if names is not None. In that
# case the function is being called by from "trusted" source (e.g. right above here)
# that is assumed to provide valid inputs. In that case verify=False.
if verify:
if None in names:
raise TypeError('Cannot have None for column name')
if len(set(names)) != len(names):
raise ValueError('Duplicate column names')
table.columns = table.TableColumns((name, col) for name, col in zip(names, cols))
for col in cols:
table._set_col_parent_table_and_mask(col)
def _set_col_parent_table_and_mask(self, col):
"""
Set ``col.parent_table = self`` and force ``col`` to have ``mask``
attribute if the table is masked and ``col.mask`` does not exist.
"""
# For Column instances it is much faster to do direct attribute access
# instead of going through .info
col_info = col if isinstance(col, Column) else col.info
col_info.parent_table = self
# Legacy behavior for masked table
if self.masked and not hasattr(col, 'mask'):
col.mask = FalseArray(col.shape)
def itercols(self):
"""
Iterate over the columns of this table.
Examples
--------
To iterate over the columns of a table::
>>> t = Table([[1], [2]])
>>> for col in t.itercols():
... print(col)
col0
----
1
col1
----
2
Using ``itercols()`` is similar to ``for col in t.columns.values()``
but is syntactically preferred.
"""
for colname in self.columns:
yield self[colname]
def _base_repr_(self, html=False, descr_vals=None, max_width=None,
tableid=None, show_dtype=True, max_lines=None,
tableclass=None):
if descr_vals is None:
descr_vals = [self.__class__.__name__]
if self.masked:
descr_vals.append('masked=True')
descr_vals.append(f'length={len(self)}')
descr = ' '.join(descr_vals)
if html:
from astropy.utils.xml.writer import xml_escape
descr = f'<i>{xml_escape(descr)}</i>\n'
else:
descr = f'<{descr}>\n'
if tableid is None:
tableid = f'table{id(self)}'
data_lines, outs = self.formatter._pformat_table(
self, tableid=tableid, html=html, max_width=max_width,
show_name=True, show_unit=None, show_dtype=show_dtype,
max_lines=max_lines, tableclass=tableclass)
out = descr + '\n'.join(data_lines)
return out
def _repr_html_(self):
out = self._base_repr_(html=True, max_width=-1,
tableclass=conf.default_notebook_table_class)
# Wrap <table> in <div>. This follows the pattern in pandas and allows
# table to be scrollable horizontally in VS Code notebook display.
out = f'<div>{out}</div>'
return out
def __repr__(self):
return self._base_repr_(html=False, max_width=None)
def __str__(self):
return '\n'.join(self.pformat())
def __bytes__(self):
return str(self).encode('utf-8')
@property
def has_mixin_columns(self):
"""
True if table has any mixin columns (defined as columns that are not Column
subclasses).
"""
return any(has_info_class(col, MixinInfo) for col in self.columns.values())
@property
def has_masked_columns(self):
"""True if table has any ``MaskedColumn`` columns.
This does not check for mixin columns that may have masked values, use the
``has_masked_values`` property in that case.
"""
return any(isinstance(col, MaskedColumn) for col in self.itercols())
@property
def has_masked_values(self):
"""True if column in the table has values which are masked.
This may be relatively slow for large tables as it requires checking the mask
values of each column.
"""
for col in self.itercols():
if hasattr(col, 'mask') and np.any(col.mask):
return True
else:
return False
def _is_mixin_for_table(self, col):
"""
Determine if ``col`` should be added to the table directly as
a mixin column.
"""
if isinstance(col, BaseColumn):
return False
# Is it a mixin but not [Masked]Quantity (which gets converted to
# [Masked]Column with unit set).
return has_info_class(col, MixinInfo) and not has_info_class(col, QuantityInfo)
@format_doc(_pprint_docs)
def pprint(self, max_lines=None, max_width=None, show_name=True,
show_unit=None, show_dtype=False, align=None):
"""Print a formatted string representation of the table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for max_width except the configuration item is
``astropy.conf.max_width``.
"""
lines, outs = self.formatter._pformat_table(self, max_lines, max_width,
show_name=show_name, show_unit=show_unit,
show_dtype=show_dtype, align=align)
if outs['show_length']:
lines.append(f'Length = {len(self)} rows')
n_header = outs['n_header']
for i, line in enumerate(lines):
if i < n_header:
color_print(line, 'red')
else:
print(line)
@format_doc(_pprint_docs)
def pprint_all(self, max_lines=-1, max_width=-1, show_name=True,
show_unit=None, show_dtype=False, align=None):
"""Print a formatted string representation of the entire table.
This method is the same as `astropy.table.Table.pprint` except that
the default ``max_lines`` and ``max_width`` are both -1 so that by
default the entire table is printed instead of restricting to the size
of the screen terminal.
"""
return self.pprint(max_lines, max_width, show_name,
show_unit, show_dtype, align)
def _make_index_row_display_table(self, index_row_name):
if index_row_name not in self.columns:
idx_col = self.ColumnClass(name=index_row_name, data=np.arange(len(self)))
return self.__class__([idx_col] + list(self.columns.values()),
copy=False)
else:
return self
def show_in_notebook(self, tableid=None, css=None, display_length=50,
table_class='astropy-default', show_row_index='idx'):
"""Render the table in HTML and show it in the IPython notebook.
Parameters
----------
tableid : str or None
An html ID tag for the table. Default is ``table{id}-XXX``, where
id is the unique integer id of the table object, id(self), and XXX
is a random number to avoid conflicts when printing the same table
multiple times.
table_class : str or None
A string with a list of HTML classes used to style the table.
The special default string ('astropy-default') means that the string
will be retrieved from the configuration item
``astropy.table.default_notebook_table_class``. Note that these
table classes may make use of bootstrap, as this is loaded with the
notebook. See `this page <https://getbootstrap.com/css/#tables>`_
for the list of classes.
css : str
A valid CSS string declaring the formatting for the table. Defaults
to ``astropy.table.jsviewer.DEFAULT_CSS_NB``.
display_length : int, optional
Number or rows to show. Defaults to 50.
show_row_index : str or False
If this does not evaluate to False, a column with the given name
will be added to the version of the table that gets displayed.
This new column shows the index of the row in the table itself,
even when the displayed table is re-sorted by another column. Note
that if a column with this name already exists, this option will be
ignored. Defaults to "idx".
Notes
-----
Currently, unlike `show_in_browser` (with ``jsviewer=True``), this
method needs to access online javascript code repositories. This is due
to modern browsers' limitations on accessing local files. Hence, if you
call this method while offline (and don't have a cached version of
jquery and jquery.dataTables), you will not get the jsviewer features.
"""
from .jsviewer import JSViewer
from IPython.display import HTML
if tableid is None:
tableid = f'table{id(self)}-{np.random.randint(1, 1e6)}'
jsv = JSViewer(display_length=display_length)
if show_row_index:
display_table = self._make_index_row_display_table(show_row_index)
else:
display_table = self
if table_class == 'astropy-default':
table_class = conf.default_notebook_table_class
html = display_table._base_repr_(html=True, max_width=-1, tableid=tableid,
max_lines=-1, show_dtype=False,
tableclass=table_class)
columns = display_table.columns.values()
sortable_columns = [i for i, col in enumerate(columns)
if col.info.dtype.kind in 'iufc']
html += jsv.ipynb(tableid, css=css, sort_columns=sortable_columns)
return HTML(html)
def show_in_browser(self, max_lines=5000, jsviewer=False,
browser='default', jskwargs={'use_local_files': True},
tableid=None, table_class="display compact",
css=None, show_row_index='idx'):
"""Render the table in HTML and show it in a web browser.
Parameters
----------
max_lines : int
Maximum number of rows to export to the table (set low by default
to avoid memory issues, since the browser view requires duplicating
the table in memory). A negative value of ``max_lines`` indicates
no row limit.
jsviewer : bool
If `True`, prepends some javascript headers so that the table is
rendered as a `DataTables <https://datatables.net>`_ data table.
This allows in-browser searching & sorting.
browser : str
Any legal browser name, e.g. ``'firefox'``, ``'chrome'``,
``'safari'`` (for mac, you may need to use ``'open -a
"/Applications/Google Chrome.app" {}'`` for Chrome). If
``'default'``, will use the system default browser.
jskwargs : dict
Passed to the `astropy.table.JSViewer` init. Defaults to
``{'use_local_files': True}`` which means that the JavaScript
libraries will be served from local copies.
tableid : str or None
An html ID tag for the table. Default is ``table{id}``, where id
is the unique integer id of the table object, id(self).
table_class : str or None
A string with a list of HTML classes used to style the table.
Default is "display compact", and other possible values can be
found in https://www.datatables.net/manual/styling/classes
css : str
A valid CSS string declaring the formatting for the table. Defaults
to ``astropy.table.jsviewer.DEFAULT_CSS``.
show_row_index : str or False
If this does not evaluate to False, a column with the given name
will be added to the version of the table that gets displayed.
This new column shows the index of the row in the table itself,
even when the displayed table is re-sorted by another column. Note
that if a column with this name already exists, this option will be
ignored. Defaults to "idx".
"""
import os
import webbrowser
import tempfile
from .jsviewer import DEFAULT_CSS
from urllib.parse import urljoin
from urllib.request import pathname2url
if css is None:
css = DEFAULT_CSS
# We can't use NamedTemporaryFile here because it gets deleted as
# soon as it gets garbage collected.
tmpdir = tempfile.mkdtemp()
path = os.path.join(tmpdir, 'table.html')
with open(path, 'w') as tmp:
if jsviewer:
if show_row_index:
display_table = self._make_index_row_display_table(show_row_index)
else:
display_table = self
display_table.write(tmp, format='jsviewer', css=css,
max_lines=max_lines, jskwargs=jskwargs,
table_id=tableid, table_class=table_class)
else:
self.write(tmp, format='html')
try:
br = webbrowser.get(None if browser == 'default' else browser)
except webbrowser.Error:
log.error(f"Browser '{browser}' not found.")
else:
br.open(urljoin('file:', pathname2url(path)))
@format_doc(_pformat_docs, id="{id}")
def pformat(self, max_lines=None, max_width=None, show_name=True,
show_unit=None, show_dtype=False, html=False, tableid=None,
align=None, tableclass=None):
"""Return a list of lines for the formatted string representation of
the table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for ``max_width`` except the configuration item is
``astropy.conf.max_width``.
"""
lines, outs = self.formatter._pformat_table(
self, max_lines, max_width, show_name=show_name,
show_unit=show_unit, show_dtype=show_dtype, html=html,
tableid=tableid, tableclass=tableclass, align=align)
if outs['show_length']:
lines.append(f'Length = {len(self)} rows')
return lines
@format_doc(_pformat_docs, id="{id}")
def pformat_all(self, max_lines=-1, max_width=-1, show_name=True,
show_unit=None, show_dtype=False, html=False, tableid=None,
align=None, tableclass=None):
"""Return a list of lines for the formatted string representation of
the entire table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for ``max_width`` except the configuration item is
``astropy.conf.max_width``.
"""
return self.pformat(max_lines, max_width, show_name,
show_unit, show_dtype, html, tableid,
align, tableclass)
def more(self, max_lines=None, max_width=None, show_name=True,
show_unit=None, show_dtype=False):
"""Interactively browse table with a paging interface.
Supported keys::
f, <space> : forward one page
b : back one page
r : refresh same page
n : next row
p : previous row
< : go to beginning
> : go to end
q : quit browsing
h : print this help
Parameters
----------
max_lines : int
Maximum number of lines in table output
max_width : int or None
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is False.
"""
self.formatter._more_tabcol(self, max_lines, max_width, show_name=show_name,
show_unit=show_unit, show_dtype=show_dtype)
def __getitem__(self, item):
if isinstance(item, str):
return self.columns[item]
elif isinstance(item, (int, np.integer)):
return self.Row(self, item)
elif (isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == 'i'):
return self.Row(self, item.item())
elif self._is_list_or_tuple_of_str(item):
out = self.__class__([self[x] for x in item],
copy_indices=self._copy_indices)
out._groups = groups.TableGroups(out, indices=self.groups._indices,
keys=self.groups._keys)
out.meta = self.meta.copy() # Shallow copy for meta
return out
elif ((isinstance(item, np.ndarray) and item.size == 0)
or (isinstance(item, (tuple, list)) and not item)):
# If item is an empty array/list/tuple then return the table with no rows
return self._new_from_slice([])
elif (isinstance(item, slice)
or isinstance(item, np.ndarray)
or isinstance(item, list)
or isinstance(item, tuple) and all(isinstance(x, np.ndarray)
for x in item)):
# here for the many ways to give a slice; a tuple of ndarray
# is produced by np.where, as in t[np.where(t['a'] > 2)]
# For all, a new table is constructed with slice of all columns
return self._new_from_slice(item)
else:
raise ValueError(f'Illegal type {type(item)} for table item access')
def __setitem__(self, item, value):
# If the item is a string then it must be the name of a column.
# If that column doesn't already exist then create it now.
if isinstance(item, str) and item not in self.colnames:
self.add_column(value, name=item, copy=True)
else:
n_cols = len(self.columns)
if isinstance(item, str):
# Set an existing column by first trying to replace, and if
# this fails do an in-place update. See definition of mask
# property for discussion of the _setitem_inplace attribute.
if (not getattr(self, '_setitem_inplace', False)
and not conf.replace_inplace):
try:
self._replace_column_warnings(item, value)
return
except Exception:
pass
self.columns[item][:] = value
elif isinstance(item, (int, np.integer)):
self._set_row(idx=item, colnames=self.colnames, vals=value)
elif (isinstance(item, slice)
or isinstance(item, np.ndarray)
or isinstance(item, list)
or (isinstance(item, tuple) # output from np.where
and all(isinstance(x, np.ndarray) for x in item))):
if isinstance(value, Table):
vals = (col for col in value.columns.values())
elif isinstance(value, np.ndarray) and value.dtype.names:
vals = (value[name] for name in value.dtype.names)
elif np.isscalar(value):
vals = itertools.repeat(value, n_cols)
else: # Assume this is an iterable that will work
if len(value) != n_cols:
raise ValueError('Right side value needs {} elements (one for each column)'
.format(n_cols))
vals = value
for col, val in zip(self.columns.values(), vals):
col[item] = val
else:
raise ValueError(f'Illegal type {type(item)} for table item access')
def __delitem__(self, item):
if isinstance(item, str):
self.remove_column(item)
elif isinstance(item, (int, np.integer)):
self.remove_row(item)
elif (isinstance(item, (list, tuple, np.ndarray))
and all(isinstance(x, str) for x in item)):
self.remove_columns(item)
elif (isinstance(item, (list, np.ndarray))
and np.asarray(item).dtype.kind == 'i'):
self.remove_rows(item)
elif isinstance(item, slice):
self.remove_rows(item)
else:
raise IndexError('illegal key or index value')
def _ipython_key_completions_(self):
return self.colnames
def field(self, item):
"""Return column[item] for recarray compatibility."""
return self.columns[item]
@property
def masked(self):
return self._masked
@masked.setter
def masked(self, masked):
raise Exception('Masked attribute is read-only (use t = Table(t, masked=True)'
' to convert to a masked table)')
def _set_masked(self, masked):
"""
Set the table masked property.
Parameters
----------
masked : bool
State of table masking (`True` or `False`)
"""
if masked in [True, False, None]:
self._masked = masked
else:
raise ValueError("masked should be one of True, False, None")
self._column_class = self.MaskedColumn if self._masked else self.Column
@property
def ColumnClass(self):
if self._column_class is None:
return self.Column
else:
return self._column_class
@property
def dtype(self):
return np.dtype([descr(col) for col in self.columns.values()])
@property
def colnames(self):
return list(self.columns.keys())
@staticmethod
def _is_list_or_tuple_of_str(names):
"""Check that ``names`` is a tuple or list of strings"""
return (isinstance(names, (tuple, list)) and names
and all(isinstance(x, str) for x in names))
def keys(self):
return list(self.columns.keys())
def values(self):
return self.columns.values()
def items(self):
return self.columns.items()
def __len__(self):
# For performance reasons (esp. in Row) cache the first column name
# and use that subsequently for the table length. If might not be
# available yet or the column might be gone now, in which case
# try again in the except block.
try:
return len(OrderedDict.__getitem__(self.columns, self._first_colname))
except (AttributeError, KeyError):
if len(self.columns) == 0:
return 0
# Get the first column name
self._first_colname = next(iter(self.columns))
return len(self.columns[self._first_colname])
def index_column(self, name):
"""
Return the positional index of column ``name``.
Parameters
----------
name : str
column name
Returns
-------
index : int
Positional index of column ``name``.
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Get index of column 'b' of the table::
>>> t.index_column('b')
1
"""
try:
return self.colnames.index(name)
except ValueError:
raise ValueError(f"Column {name} does not exist")
def add_column(self, col, index=None, name=None, rename_duplicate=False, copy=True,
default_name=None):
"""
Add a new column to the table using ``col`` as input. If ``index``
is supplied then insert column before ``index`` position
in the list of columns, otherwise append column to the end
of the list.
The ``col`` input can be any data object which is acceptable as a
`~astropy.table.Table` column object or can be converted. This includes
mixin columns and scalar or length=1 objects which get broadcast to match
the table length.
To add several columns at once use ``add_columns()`` or simply call
``add_column()`` for each one. There is very little performance difference
in the two approaches.
Parameters
----------
col : object
Data object for the new column
index : int or None
Insert column before this position or at end (default).
name : str
Column name
rename_duplicate : bool
Uniquify column name if it already exist. Default is False.
copy : bool
Make a copy of the new column. Default is True.
default_name : str or None
Name to use if both ``name`` and ``col.info.name`` are not available.
Defaults to ``col{number_of_columns}``.
Examples
--------
Create a table with two columns 'a' and 'b', then create a third column 'c'
and append it to the end of the table::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> col_c = Column(name='c', data=['x', 'y'])
>>> t.add_column(col_c)
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
Add column 'd' at position 1. Note that the column is inserted
before the given index::
>>> t.add_column(['a', 'b'], name='d', index=1)
>>> print(t)
a d b c
--- --- --- ---
1 a 0.1 x
2 b 0.2 y
Add second column named 'b' with rename_duplicate::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> t.add_column(1.1, name='b', rename_duplicate=True)
>>> print(t)
a b b_1
--- --- ---
1 0.1 1.1
2 0.2 1.1
Add an unnamed column or mixin object in the table using a default name
or by specifying an explicit name with ``name``. Name can also be overridden::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> t.add_column(['a', 'b'])
>>> t.add_column(col_c, name='d')
>>> print(t)
a b col2 d
--- --- ---- ---
1 0.1 a x
2 0.2 b y
"""
if default_name is None:
default_name = f'col{len(self.columns)}'
# Convert col data to acceptable object for insertion into self.columns.
# Note that along with the lines above and below, this allows broadcasting
# of scalars to the correct shape for adding to table.
col = self._convert_data_to_col(col, name=name, copy=copy,
default_name=default_name)
# Assigning a scalar column to an empty table should result in an
# exception (see #3811).
if col.shape == () and len(self) == 0:
raise TypeError('Empty table cannot have column set to scalar value')
# Make col data shape correct for scalars. The second test is to allow
# broadcasting an N-d element to a column, e.g. t['new'] = [[1, 2]].
elif (col.shape == () or col.shape[0] == 1) and len(self) > 0:
new_shape = (len(self),) + getattr(col, 'shape', ())[1:]
if isinstance(col, np.ndarray):
col = np.broadcast_to(col, shape=new_shape,
subok=True)
elif isinstance(col, ShapedLikeNDArray):
col = col._apply(np.broadcast_to, shape=new_shape,
subok=True)
# broadcast_to() results in a read-only array. Apparently it only changes
# the view to look like the broadcasted array. So copy.
col = col_copy(col)
name = col.info.name
# Ensure that new column is the right length
if len(self.columns) > 0 and len(col) != len(self):
raise ValueError('Inconsistent data column lengths')
if rename_duplicate:
orig_name = name
i = 1
while name in self.columns:
# Iterate until a unique name is found
name = orig_name + '_' + str(i)
i += 1
col.info.name = name
# Set col parent_table weakref and ensure col has mask attribute if table.masked
self._set_col_parent_table_and_mask(col)
# Add new column as last column
self.columns[name] = col
if index is not None:
# Move the other cols to the right of the new one
move_names = self.colnames[index:-1]
for move_name in move_names:
self.columns.move_to_end(move_name, last=True)
def add_columns(self, cols, indexes=None, names=None, copy=True, rename_duplicate=False):
"""
Add a list of new columns the table using ``cols`` data objects. If a
corresponding list of ``indexes`` is supplied then insert column
before each ``index`` position in the *original* list of columns,
otherwise append columns to the end of the list.
The ``cols`` input can include any data objects which are acceptable as
`~astropy.table.Table` column objects or can be converted. This includes
mixin columns and scalar or length=1 objects which get broadcast to match
the table length.
From a performance perspective there is little difference between calling
this method once or looping over the new columns and calling ``add_column()``
for each column.
Parameters
----------
cols : list of object
List of data objects for the new columns
indexes : list of int or None
Insert column before this position or at end (default).
names : list of str
Column names
copy : bool
Make a copy of the new columns. Default is True.
rename_duplicate : bool
Uniquify new column names if they duplicate the existing ones.
Default is False.
See Also
--------
astropy.table.hstack, update, replace_column
Examples
--------
Create a table with two columns 'a' and 'b', then create columns 'c' and 'd'
and append them to the end of the table::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> col_c = Column(name='c', data=['x', 'y'])
>>> col_d = Column(name='d', data=['u', 'v'])
>>> t.add_columns([col_c, col_d])
>>> print(t)
a b c d
--- --- --- ---
1 0.1 x u
2 0.2 y v
Add column 'c' at position 0 and column 'd' at position 1. Note that
the columns are inserted before the given position::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> t.add_columns([['x', 'y'], ['u', 'v']], names=['c', 'd'],
... indexes=[0, 1])
>>> print(t)
c a d b
--- --- --- ---
x 1 u 0.1
y 2 v 0.2
Add second column 'b' and column 'c' with ``rename_duplicate``::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> t.add_columns([[1.1, 1.2], ['x', 'y']], names=('b', 'c'),
... rename_duplicate=True)
>>> print(t)
a b b_1 c
--- --- --- ---
1 0.1 1.1 x
2 0.2 1.2 y
Add unnamed columns or mixin objects in the table using default names
or by specifying explicit names with ``names``. Names can also be overridden::
>>> t = Table()
>>> col_b = Column(name='b', data=['u', 'v'])
>>> t.add_columns([[1, 2], col_b])
>>> t.add_columns([[3, 4], col_b], names=['c', 'd'])
>>> print(t)
col0 b c d
---- --- --- ---
1 u 3 u
2 v 4 v
"""
if indexes is None:
indexes = [len(self.columns)] * len(cols)
elif len(indexes) != len(cols):
raise ValueError('Number of indexes must match number of cols')
if names is None:
names = (None,) * len(cols)
elif len(names) != len(cols):
raise ValueError('Number of names must match number of cols')
default_names = [f'col{ii + len(self.columns)}'
for ii in range(len(cols))]
for ii in reversed(np.argsort(indexes)):
self.add_column(cols[ii], index=indexes[ii], name=names[ii],
default_name=default_names[ii],
rename_duplicate=rename_duplicate, copy=copy)
def _replace_column_warnings(self, name, col):
"""
Same as replace_column but issues warnings under various circumstances.
"""
warns = conf.replace_warnings
refcount = None
old_col = None
if 'refcount' in warns and name in self.colnames:
refcount = sys.getrefcount(self[name])
if name in self.colnames:
old_col = self[name]
# This may raise an exception (e.g. t['a'] = 1) in which case none of
# the downstream code runs.
self.replace_column(name, col)
if 'always' in warns:
warnings.warn(f"replaced column '{name}'",
TableReplaceWarning, stacklevel=3)
if 'slice' in warns:
try:
# Check for ndarray-subclass slice. An unsliced instance
# has an ndarray for the base while sliced has the same class
# as parent.
if isinstance(old_col.base, old_col.__class__):
msg = ("replaced column '{}' which looks like an array slice. "
"The new column no longer shares memory with the "
"original array.".format(name))
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
except AttributeError:
pass
if 'refcount' in warns:
# Did reference count change?
new_refcount = sys.getrefcount(self[name])
if refcount != new_refcount:
msg = ("replaced column '{}' and the number of references "
"to the column changed.".format(name))
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
if 'attributes' in warns:
# Any of the standard column attributes changed?
changed_attrs = []
new_col = self[name]
# Check base DataInfo attributes that any column will have
for attr in DataInfo.attr_names:
if getattr(old_col.info, attr) != getattr(new_col.info, attr):
changed_attrs.append(attr)
if changed_attrs:
msg = ("replaced column '{}' and column attributes {} changed."
.format(name, changed_attrs))
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
def replace_column(self, name, col, copy=True):
"""
Replace column ``name`` with the new ``col`` object.
The behavior of ``copy`` for Column objects is:
- copy=True: new class instance with a copy of data and deep copy of meta
- copy=False: new class instance with same data and a key-only copy of meta
For mixin columns:
- copy=True: new class instance with copy of data and deep copy of meta
- copy=False: original instance (no copy at all)
Parameters
----------
name : str
Name of column to replace
col : `~astropy.table.Column` or `~numpy.ndarray` or sequence
New column object to replace the existing column.
copy : bool
Make copy of the input ``col``, default=True
See Also
--------
add_columns, astropy.table.hstack, update
Examples
--------
Replace column 'a' with a float version of itself::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> float_a = t['a'].astype(float)
>>> t.replace_column('a', float_a)
"""
if name not in self.colnames:
raise ValueError(f'column name {name} is not in the table')
if self[name].info.indices:
raise ValueError('cannot replace a table index column')
col = self._convert_data_to_col(col, name=name, copy=copy)
self._set_col_parent_table_and_mask(col)
# Ensure that new column is the right length, unless it is the only column
# in which case re-sizing is allowed.
if len(self.columns) > 1 and len(col) != len(self[name]):
raise ValueError('length of new column must match table length')
self.columns.__setitem__(name, col, validated=True)
def remove_row(self, index):
"""
Remove a row from the table.
Parameters
----------
index : int
Index of row to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove row 1 from the table::
>>> t.remove_row(1)
>>> print(t)
a b c
--- --- ---
1 0.1 x
3 0.3 z
To remove several rows at the same time use remove_rows.
"""
# check the index against the types that work with np.delete
if not isinstance(index, (int, np.integer)):
raise TypeError("Row index must be an integer")
self.remove_rows(index)
def remove_rows(self, row_specifier):
"""
Remove rows from the table.
Parameters
----------
row_specifier : slice or int or array of int
Specification for rows to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove rows 0 and 2 from the table::
>>> t.remove_rows([0, 2])
>>> print(t)
a b c
--- --- ---
2 0.2 y
Note that there are no warnings if the slice operator extends
outside the data::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.remove_rows(slice(10, 20, 1))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
"""
# Update indices
for index in self.indices:
index.remove_rows(row_specifier)
keep_mask = np.ones(len(self), dtype=bool)
keep_mask[row_specifier] = False
columns = self.TableColumns()
for name, col in self.columns.items():
newcol = col[keep_mask]
newcol.info.parent_table = self
columns[name] = newcol
self._replace_cols(columns)
# Revert groups to default (ungrouped) state
if hasattr(self, '_groups'):
del self._groups
def iterrows(self, *names):
"""
Iterate over rows of table returning a tuple of values for each row.
This method is especially useful when only a subset of columns are needed.
The ``iterrows`` method can be substantially faster than using the standard
Table row iteration (e.g. ``for row in tbl:``), since that returns a new
``~astropy.table.Row`` object for each row and accessing a column in that
row (e.g. ``row['col0']``) is slower than tuple access.
Parameters
----------
names : list
List of column names (default to all columns if no names provided)
Returns
-------
rows : iterable
Iterator returns tuples of row values
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table({'a': [1, 2, 3],
... 'b': [1.0, 2.5, 3.0],
... 'c': ['x', 'y', 'z']})
To iterate row-wise using column names::
>>> for a, c in t.iterrows('a', 'c'):
... print(a, c)
1 x
2 y
3 z
"""
if len(names) == 0:
names = self.colnames
else:
for name in names:
if name not in self.colnames:
raise ValueError(f'{name} is not a valid column name')
cols = (self[name] for name in names)
out = zip(*cols)
return out
def _set_of_names_in_colnames(self, names):
"""Return ``names`` as a set if valid, or raise a `KeyError`.
``names`` is valid if all elements in it are in ``self.colnames``.
If ``names`` is a string then it is interpreted as a single column
name.
"""
names = {names} if isinstance(names, str) else set(names)
invalid_names = names.difference(self.colnames)
if len(invalid_names) == 1:
raise KeyError(f'column "{invalid_names.pop()}" does not exist')
elif len(invalid_names) > 1:
raise KeyError(f'columns {invalid_names} do not exist')
return names
def remove_column(self, name):
"""
Remove a column from the table.
This can also be done with::
del table[name]
Parameters
----------
name : str
Name of column to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove column 'b' from the table::
>>> t.remove_column('b')
>>> print(t)
a c
--- ---
1 x
2 y
3 z
To remove several columns at the same time use remove_columns.
"""
self.remove_columns([name])
def remove_columns(self, names):
'''
Remove several columns from the table.
Parameters
----------
names : str or iterable of str
Names of the columns to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove columns 'b' and 'c' from the table::
>>> t.remove_columns(['b', 'c'])
>>> print(t)
a
---
1
2
3
Specifying only a single column also works. Remove column 'b' from the table::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.remove_columns('b')
>>> print(t)
a c
--- ---
1 x
2 y
3 z
This gives the same as using remove_column.
'''
for name in self._set_of_names_in_colnames(names):
self.columns.pop(name)
def _convert_string_dtype(self, in_kind, out_kind, encode_decode_func):
"""
Convert string-like columns to/from bytestring and unicode (internal only).
Parameters
----------
in_kind : str
Input dtype.kind
out_kind : str
Output dtype.kind
"""
for col in self.itercols():
if col.dtype.kind == in_kind:
try:
# This requires ASCII and is faster by a factor of up to ~8, so
# try that first.
newcol = col.__class__(col, dtype=out_kind)
except (UnicodeEncodeError, UnicodeDecodeError):
newcol = col.__class__(encode_decode_func(col, 'utf-8'))
# Quasi-manually copy info attributes. Unfortunately
# DataInfo.__set__ does not do the right thing in this case
# so newcol.info = col.info does not get the old info attributes.
for attr in col.info.attr_names - col.info._attrs_no_copy - {'dtype'}:
value = deepcopy(getattr(col.info, attr))
setattr(newcol.info, attr, value)
self[col.name] = newcol
def convert_bytestring_to_unicode(self):
"""
Convert bytestring columns (dtype.kind='S') to unicode (dtype.kind='U')
using UTF-8 encoding.
Internally this changes string columns to represent each character
in the string with a 4-byte UCS-4 equivalent, so it is inefficient
for memory but allows scripts to manipulate string arrays with
natural syntax.
"""
self._convert_string_dtype('S', 'U', np.char.decode)
def convert_unicode_to_bytestring(self):
"""
Convert unicode columns (dtype.kind='U') to bytestring (dtype.kind='S')
using UTF-8 encoding.
When exporting a unicode string array to a file, it may be desirable
to encode unicode columns as bytestrings.
"""
self._convert_string_dtype('U', 'S', np.char.encode)
def keep_columns(self, names):
'''
Keep only the columns specified (remove the others).
Parameters
----------
names : str or iterable of str
The columns to keep. All other columns will be removed.
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Keep only column 'a' of the table::
>>> t.keep_columns('a')
>>> print(t)
a
---
1
2
3
Keep columns 'a' and 'c' of the table::
>>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.keep_columns(['a', 'c'])
>>> print(t)
a c
--- ---
1 x
2 y
3 z
'''
names = self._set_of_names_in_colnames(names)
for colname in self.colnames:
if colname not in names:
self.columns.pop(colname)
def rename_column(self, name, new_name):
'''
Rename a column.
This can also be done directly with by setting the ``name`` attribute
for a column::
table[name].name = new_name
TODO: this won't work for mixins
Parameters
----------
name : str
The current name of the column.
new_name : str
The new name for the column
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1,2],[3,4],[5,6]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 3 5
2 4 6
Renaming column 'a' to 'aa'::
>>> t.rename_column('a' , 'aa')
>>> print(t)
aa b c
--- --- ---
1 3 5
2 4 6
'''
if name not in self.keys():
raise KeyError(f"Column {name} does not exist")
self.columns[name].info.name = new_name
def rename_columns(self, names, new_names):
'''
Rename multiple columns.
Parameters
----------
names : list, tuple
A list or tuple of existing column names.
new_names : list, tuple
A list or tuple of new column names.
Examples
--------
Create a table with three columns 'a', 'b', 'c'::
>>> t = Table([[1,2],[3,4],[5,6]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 3 5
2 4 6
Renaming columns 'a' to 'aa' and 'b' to 'bb'::
>>> names = ('a','b')
>>> new_names = ('aa','bb')
>>> t.rename_columns(names, new_names)
>>> print(t)
aa bb c
--- --- ---
1 3 5
2 4 6
'''
if not self._is_list_or_tuple_of_str(names):
raise TypeError("input 'names' must be a tuple or a list of column names")
if not self._is_list_or_tuple_of_str(new_names):
raise TypeError("input 'new_names' must be a tuple or a list of column names")
if len(names) != len(new_names):
raise ValueError("input 'names' and 'new_names' list arguments must be the same length")
for name, new_name in zip(names, new_names):
self.rename_column(name, new_name)
def _set_row(self, idx, colnames, vals):
try:
assert len(vals) == len(colnames)
except Exception:
raise ValueError('right hand side must be a sequence of values with '
'the same length as the number of selected columns')
# Keep track of original values before setting each column so that
# setting row can be transactional.
orig_vals = []
cols = self.columns
try:
for name, val in zip(colnames, vals):
orig_vals.append(cols[name][idx])
cols[name][idx] = val
except Exception:
# If anything went wrong first revert the row update then raise
for name, val in zip(colnames, orig_vals[:-1]):
cols[name][idx] = val
raise
def add_row(self, vals=None, mask=None):
"""Add a new row to the end of the table.
The ``vals`` argument can be:
sequence (e.g. tuple or list)
Column values in the same order as table columns.
mapping (e.g. dict)
Keys corresponding to column names. Missing values will be
filled with np.zeros for the column dtype.
`None`
All values filled with np.zeros for the column dtype.
This method requires that the Table object "owns" the underlying array
data. In particular one cannot add a row to a Table that was
initialized with copy=False from an existing array.
The ``mask`` attribute should give (if desired) the mask for the
values. The type of the mask should match that of the values, i.e. if
``vals`` is an iterable, then ``mask`` should also be an iterable
with the same length, and if ``vals`` is a mapping, then ``mask``
should be a dictionary.
Parameters
----------
vals : tuple, list, dict or None
Use the specified values in the new row
mask : tuple, list, dict or None
Use the specified mask values in the new row
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1,2],[4,5],[7,8]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 4 7
2 5 8
Adding a new row with entries '3' in 'a', '6' in 'b' and '9' in 'c'::
>>> t.add_row([3,6,9])
>>> print(t)
a b c
--- --- ---
1 4 7
2 5 8
3 6 9
"""
self.insert_row(len(self), vals, mask)
def insert_row(self, index, vals=None, mask=None):
"""Add a new row before the given ``index`` position in the table.
The ``vals`` argument can be:
sequence (e.g. tuple or list)
Column values in the same order as table columns.
mapping (e.g. dict)
Keys corresponding to column names. Missing values will be
filled with np.zeros for the column dtype.
`None`
All values filled with np.zeros for the column dtype.
The ``mask`` attribute should give (if desired) the mask for the
values. The type of the mask should match that of the values, i.e. if
``vals`` is an iterable, then ``mask`` should also be an iterable
with the same length, and if ``vals`` is a mapping, then ``mask``
should be a dictionary.
Parameters
----------
vals : tuple, list, dict or None
Use the specified values in the new row
mask : tuple, list, dict or None
Use the specified mask values in the new row
"""
colnames = self.colnames
N = len(self)
if index < -N or index > N:
raise IndexError("Index {} is out of bounds for table with length {}"
.format(index, N))
if index < 0:
index += N
if isinstance(vals, Mapping) or vals is None:
# From the vals and/or mask mappings create the corresponding lists
# that have entries for each table column.
if mask is not None and not isinstance(mask, Mapping):
raise TypeError("Mismatch between type of vals and mask")
# Now check that the mask is specified for the same keys as the
# values, otherwise things get really confusing.
if mask is not None and set(vals.keys()) != set(mask.keys()):
raise ValueError('keys in mask should match keys in vals')
if vals and any(name not in colnames for name in vals):
raise ValueError('Keys in vals must all be valid column names')
vals_list = []
mask_list = []
for name in colnames:
if vals and name in vals:
vals_list.append(vals[name])
mask_list.append(False if mask is None else mask[name])
else:
col = self[name]
if hasattr(col, 'dtype'):
# Make a placeholder zero element of the right type which is masked.
# This assumes the appropriate insert() method will broadcast a
# numpy scalar to the right shape.
vals_list.append(np.zeros(shape=(), dtype=col.dtype))
# For masked table any unsupplied values are masked by default.
mask_list.append(self.masked and vals is not None)
else:
raise ValueError(f"Value must be supplied for column '{name}'")
vals = vals_list
mask = mask_list
if isiterable(vals):
if mask is not None and (not isiterable(mask) or isinstance(mask, Mapping)):
raise TypeError("Mismatch between type of vals and mask")
if len(self.columns) != len(vals):
raise ValueError('Mismatch between number of vals and columns')
if mask is not None:
if len(self.columns) != len(mask):
raise ValueError('Mismatch between number of masks and columns')
else:
mask = [False] * len(self.columns)
else:
raise TypeError('Vals must be an iterable or mapping or None')
# Insert val at index for each column
columns = self.TableColumns()
for name, col, val, mask_ in zip(colnames, self.columns.values(), vals, mask):
try:
# If new val is masked and the existing column does not support masking
# then upgrade the column to a mask-enabled type: either the table-level
# default ColumnClass or else MaskedColumn.
if mask_ and isinstance(col, Column) and not isinstance(col, MaskedColumn):
col_cls = (self.ColumnClass
if issubclass(self.ColumnClass, self.MaskedColumn)
else self.MaskedColumn)
col = col_cls(col, copy=False)
newcol = col.insert(index, val, axis=0)
if len(newcol) != N + 1:
raise ValueError('Incorrect length for column {} after inserting {}'
' (expected {}, got {})'
.format(name, val, len(newcol), N + 1))
newcol.info.parent_table = self
# Set mask if needed and possible
if mask_:
if hasattr(newcol, 'mask'):
newcol[index] = np.ma.masked
else:
raise TypeError("mask was supplied for column '{}' but it does not "
"support masked values".format(col.info.name))
columns[name] = newcol
except Exception as err:
raise ValueError("Unable to insert row because of exception in column '{}':\n{}"
.format(name, err)) from err
for table_index in self.indices:
table_index.insert_row(index, vals, self.columns.values())
self._replace_cols(columns)
# Revert groups to default (ungrouped) state
if hasattr(self, '_groups'):
del self._groups
def _replace_cols(self, columns):
for col, new_col in zip(self.columns.values(), columns.values()):
new_col.info.indices = []
for index in col.info.indices:
index.columns[index.col_position(col.info.name)] = new_col
new_col.info.indices.append(index)
self.columns = columns
def update(self, other, copy=True):
"""
Perform a dictionary-style update and merge metadata.
The argument ``other`` must be a |Table|, or something that can be used
to initialize a table. Columns from (possibly converted) ``other`` are
added to this table. In case of matching column names the column from
this table is replaced with the one from ``other``.
Parameters
----------
other : table-like
Data to update this table with.
copy : bool
Whether the updated columns should be copies of or references to
the originals.
See Also
--------
add_columns, astropy.table.hstack, replace_column
Examples
--------
Update a table with another table::
>>> t1 = Table({'a': ['foo', 'bar'], 'b': [0., 0.]}, meta={'i': 0})
>>> t2 = Table({'b': [1., 2.], 'c': [7., 11.]}, meta={'n': 2})
>>> t1.update(t2)
>>> t1
<Table length=2>
a b c
str3 float64 float64
---- ------- -------
foo 1.0 7.0
bar 2.0 11.0
>>> t1.meta
{'i': 0, 'n': 2}
Update a table with a dictionary::
>>> t = Table({'a': ['foo', 'bar'], 'b': [0., 0.]})
>>> t.update({'b': [1., 2.]})
>>> t
<Table length=2>
a b
str3 float64
---- -------
foo 1.0
bar 2.0
"""
from .operations import _merge_table_meta
if not isinstance(other, Table):
other = self.__class__(other, copy=copy)
common_cols = set(self.colnames).intersection(other.colnames)
for name, col in other.items():
if name in common_cols:
self.replace_column(name, col, copy=copy)
else:
self.add_column(col, name=name, copy=copy)
_merge_table_meta(self, [self, other], metadata_conflicts='silent')
def argsort(self, keys=None, kind=None, reverse=False):
"""
Return the indices which would sort the table according to one or
more key columns. This simply calls the `numpy.argsort` function on
the table with the ``order`` parameter set to ``keys``.
Parameters
----------
keys : str or list of str
The column name(s) to order the table by
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm used by ``numpy.argsort``.
reverse : bool
Sort in reverse order (default=False)
Returns
-------
index_array : ndarray, int
Array of indices that sorts the table by the specified key
column(s).
"""
if isinstance(keys, str):
keys = [keys]
# use index sorted order if possible
if keys is not None:
index = get_index(self, names=keys)
if index is not None:
idx = np.asarray(index.sorted_data())
return idx[::-1] if reverse else idx
kwargs = {}
if keys:
# For multiple keys return a structured array which gets sorted,
# while for a single key return a single ndarray. Sorting a
# one-column structured array is slower than ndarray (e.g. a
# factor of ~6 for a 10 million long random array), and much slower
# for in principle sortable columns like Time, which get stored as
# object arrays.
if len(keys) > 1:
kwargs['order'] = keys
data = self.as_array(names=keys)
else:
data = self[keys[0]]
else:
# No keys provided so sort on all columns.
data = self.as_array()
if kind:
kwargs['kind'] = kind
# np.argsort will look for a possible .argsort method (e.g., for Time),
# and if that fails cast to an array and try sorting that way.
idx = np.argsort(data, **kwargs)
return idx[::-1] if reverse else idx
def sort(self, keys=None, *, kind=None, reverse=False):
'''
Sort the table according to one or more keys. This operates
on the existing table and does not return a new table.
Parameters
----------
keys : str or list of str
The key(s) to order the table by. If None, use the
primary index of the Table.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm used by ``numpy.argsort``.
reverse : bool
Sort in reverse order (default=False)
Examples
--------
Create a table with 3 columns::
>>> t = Table([['Max', 'Jo', 'John'], ['Miller', 'Miller', 'Jackson'],
... [12, 15, 18]], names=('firstname', 'name', 'tel'))
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
Jo Miller 15
John Jackson 18
Sorting according to standard sorting rules, first 'name' then 'firstname'::
>>> t.sort(['name', 'firstname'])
>>> print(t)
firstname name tel
--------- ------- ---
John Jackson 18
Jo Miller 15
Max Miller 12
Sorting according to standard sorting rules, first 'firstname' then 'tel',
in reverse order::
>>> t.sort(['firstname', 'tel'], reverse=True)
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
John Jackson 18
Jo Miller 15
'''
if keys is None:
if not self.indices:
raise ValueError("Table sort requires input keys or a table index")
keys = [x.info.name for x in self.indices[0].columns]
if isinstance(keys, str):
keys = [keys]
indexes = self.argsort(keys, kind=kind, reverse=reverse)
with self.index_mode('freeze'):
for name, col in self.columns.items():
# Make a new sorted column. This requires that take() also copies
# relevant info attributes for mixin columns.
new_col = col.take(indexes, axis=0)
# First statement in try: will succeed if the column supports an in-place
# update, and matches the legacy behavior of astropy Table. However,
# some mixin classes may not support this, so in that case just drop
# in the entire new column. See #9553 and #9536 for discussion.
try:
col[:] = new_col
except Exception:
# In-place update failed for some reason, exception class not
# predictable for arbitrary mixin.
self[col.info.name] = new_col
def reverse(self):
'''
Reverse the row order of table rows. The table is reversed
in place and there are no function arguments.
Examples
--------
Create a table with three columns::
>>> t = Table([['Max', 'Jo', 'John'], ['Miller','Miller','Jackson'],
... [12,15,18]], names=('firstname','name','tel'))
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
Jo Miller 15
John Jackson 18
Reversing order::
>>> t.reverse()
>>> print(t)
firstname name tel
--------- ------- ---
John Jackson 18
Jo Miller 15
Max Miller 12
'''
for col in self.columns.values():
# First statement in try: will succeed if the column supports an in-place
# update, and matches the legacy behavior of astropy Table. However,
# some mixin classes may not support this, so in that case just drop
# in the entire new column. See #9836, #9553, and #9536 for discussion.
new_col = col[::-1]
try:
col[:] = new_col
except Exception:
# In-place update failed for some reason, exception class not
# predictable for arbitrary mixin.
self[col.info.name] = new_col
for index in self.indices:
index.reverse()
def round(self, decimals=0):
'''
Round numeric columns in-place to the specified number of decimals.
Non-numeric columns will be ignored.
Examples
--------
Create three columns with different types:
>>> t = Table([[1, 4, 5], [-25.55, 12.123, 85],
... ['a', 'b', 'c']], names=('a', 'b', 'c'))
>>> print(t)
a b c
--- ------ ---
1 -25.55 a
4 12.123 b
5 85.0 c
Round them all to 0:
>>> t.round(0)
>>> print(t)
a b c
--- ----- ---
1 -26.0 a
4 12.0 b
5 85.0 c
Round column 'a' to -1 decimal:
>>> t.round({'a':-1})
>>> print(t)
a b c
--- ----- ---
0 -26.0 a
0 12.0 b
0 85.0 c
Parameters
----------
decimals: int, dict
Number of decimals to round the columns to. If a dict is given,
the columns will be rounded to the number specified as the value.
If a certain column is not in the dict given, it will remain the
same.
'''
if isinstance(decimals, Mapping):
decimal_values = decimals.values()
column_names = decimals.keys()
elif isinstance(decimals, int):
decimal_values = itertools.repeat(decimals)
column_names = self.colnames
else:
raise ValueError("'decimals' argument must be an int or a dict")
for colname, decimal in zip(column_names, decimal_values):
col = self.columns[colname]
if np.issubdtype(col.info.dtype, np.number):
try:
np.around(col, decimals=decimal, out=col)
except TypeError:
# Bug in numpy see https://github.com/numpy/numpy/issues/15438
col[()] = np.around(col, decimals=decimal)
def copy(self, copy_data=True):
'''
Return a copy of the table.
Parameters
----------
copy_data : bool
If `True` (the default), copy the underlying data array.
Otherwise, use the same data array. The ``meta`` is always
deepcopied regardless of the value for ``copy_data``.
'''
out = self.__class__(self, copy=copy_data)
# If the current table is grouped then do the same in the copy
if hasattr(self, '_groups'):
out._groups = groups.TableGroups(out, indices=self._groups._indices,
keys=self._groups._keys)
return out
def __deepcopy__(self, memo=None):
return self.copy(True)
def __copy__(self):
return self.copy(False)
def __lt__(self, other):
return super().__lt__(other)
def __gt__(self, other):
return super().__gt__(other)
def __le__(self, other):
return super().__le__(other)
def __ge__(self, other):
return super().__ge__(other)
def __eq__(self, other):
return self._rows_equal(other)
def __ne__(self, other):
return ~self.__eq__(other)
def _rows_equal(self, other):
"""
Row-wise comparison of table with any other object.
This is actual implementation for __eq__.
Returns a 1-D boolean numpy array showing result of row-wise comparison.
This is the same as the ``==`` comparison for tables.
Parameters
----------
other : Table or DataFrame or ndarray
An object to compare with table
Examples
--------
Comparing one Table with other::
>>> t1 = Table([[1,2],[4,5],[7,8]], names=('a','b','c'))
>>> t2 = Table([[1,2],[4,5],[7,8]], names=('a','b','c'))
>>> t1._rows_equal(t2)
array([ True, True])
"""
if isinstance(other, Table):
other = other.as_array()
if self.has_masked_columns:
if isinstance(other, np.ma.MaskedArray):
result = self.as_array() == other
else:
# If mask is True, then by definition the row doesn't match
# because the other array is not masked.
false_mask = np.zeros(1, dtype=[(n, bool) for n in self.dtype.names])
result = (self.as_array().data == other) & (self.mask == false_mask)
else:
if isinstance(other, np.ma.MaskedArray):
# If mask is True, then by definition the row doesn't match
# because the other array is not masked.
false_mask = np.zeros(1, dtype=[(n, bool) for n in other.dtype.names])
result = (self.as_array() == other.data) & (other.mask == false_mask)
else:
result = self.as_array() == other
return result
def values_equal(self, other):
"""
Element-wise comparison of table with another table, list, or scalar.
Returns a ``Table`` with the same columns containing boolean values
showing result of comparison.
Parameters
----------
other : table-like object or list or scalar
Object to compare with table
Examples
--------
Compare one Table with other::
>>> t1 = Table([[1, 2], [4, 5], [-7, 8]], names=('a', 'b', 'c'))
>>> t2 = Table([[1, 2], [-4, 5], [7, 8]], names=('a', 'b', 'c'))
>>> t1.values_equal(t2)
<Table length=2>
a b c
bool bool bool
---- ----- -----
True False False
True True True
"""
if isinstance(other, Table):
names = other.colnames
else:
try:
other = Table(other, copy=False)
names = other.colnames
except Exception:
# Broadcast other into a dict, so e.g. other = 2 will turn into
# other = {'a': 2, 'b': 2} and then equality does a
# column-by-column broadcasting.
names = self.colnames
other = {name: other for name in names}
# Require column names match but do not require same column order
if set(self.colnames) != set(names):
raise ValueError('cannot compare tables with different column names')
eqs = []
for name in names:
try:
np.broadcast(self[name], other[name]) # Check if broadcast-able
# Catch the numpy FutureWarning related to equality checking,
# "elementwise comparison failed; returning scalar instead, but
# in the future will perform elementwise comparison". Turn this
# into an exception since the scalar answer is not what we want.
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
eq = self[name] == other[name]
if (warns and issubclass(warns[-1].category, FutureWarning)
and 'elementwise comparison failed' in str(warns[-1].message)):
raise FutureWarning(warns[-1].message)
except Exception as err:
raise ValueError(f'unable to compare column {name}') from err
# Be strict about the result from the comparison. E.g. SkyCoord __eq__ is just
# broken and completely ignores that it should return an array.
if not (isinstance(eq, np.ndarray)
and eq.dtype is np.dtype('bool')
and len(eq) == len(self)):
raise TypeError(f'comparison for column {name} returned {eq} '
f'instead of the expected boolean ndarray')
eqs.append(eq)
out = Table(eqs, names=names)
return out
@property
def groups(self):
if not hasattr(self, '_groups'):
self._groups = groups.TableGroups(self)
return self._groups
def group_by(self, keys):
"""
Group this table by the specified ``keys``
This effectively splits the table into groups which correspond to unique
values of the ``keys`` grouping object. The output is a new
`~astropy.table.TableGroups` which contains a copy of this table but
sorted by row according to ``keys``.
The ``keys`` input to `group_by` can be specified in different ways:
- String or list of strings corresponding to table column name(s)
- Numpy array (homogeneous or structured) with same length as this table
- `~astropy.table.Table` with same length as this table
Parameters
----------
keys : str, list of str, numpy array, or `~astropy.table.Table`
Key grouping object
Returns
-------
out : `~astropy.table.Table`
New table with groups set
"""
return groups.table_group_by(self, keys)
def to_pandas(self, index=None, use_nullable_int=True):
"""
Return a :class:`pandas.DataFrame` instance
The index of the created DataFrame is controlled by the ``index``
argument. For ``index=True`` or the default ``None``, an index will be
specified for the DataFrame if there is a primary key index on the
Table *and* if it corresponds to a single column. If ``index=False``
then no DataFrame index will be specified. If ``index`` is the name of
a column in the table then that will be the DataFrame index.
In addition to vanilla columns or masked columns, this supports Table
mixin columns like Quantity, Time, or SkyCoord. In many cases these
objects have no analog in pandas and will be converted to a "encoded"
representation using only Column or MaskedColumn. The exception is
Time or TimeDelta columns, which will be converted to the corresponding
representation in pandas using ``np.datetime64`` or ``np.timedelta64``.
See the example below.
Parameters
----------
index : None, bool, str
Specify DataFrame index mode
use_nullable_int : bool, default=True
Convert integer MaskedColumn to pandas nullable integer type.
If ``use_nullable_int=False`` or the pandas version does not support
nullable integer types (version < 0.24), then the column is converted
to float with NaN for missing elements and a warning is issued.
Returns
-------
dataframe : :class:`pandas.DataFrame`
A pandas :class:`pandas.DataFrame` instance
Raises
------
ImportError
If pandas is not installed
ValueError
If the Table has multi-dimensional columns
Examples
--------
Here we convert a table with a few mixins to a
:class:`pandas.DataFrame` instance.
>>> import pandas as pd
>>> from astropy.table import QTable
>>> import astropy.units as u
>>> from astropy.time import Time, TimeDelta
>>> from astropy.coordinates import SkyCoord
>>> q = [1, 2] * u.m
>>> tm = Time([1998, 2002], format='jyear')
>>> sc = SkyCoord([5, 6], [7, 8], unit='deg')
>>> dt = TimeDelta([3, 200] * u.s)
>>> t = QTable([q, tm, sc, dt], names=['q', 'tm', 'sc', 'dt'])
>>> df = t.to_pandas(index='tm')
>>> with pd.option_context('display.max_columns', 20):
... print(df)
q sc.ra sc.dec dt
tm
1998-01-01 1.0 5.0 7.0 0 days 00:00:03
2002-01-01 2.0 6.0 8.0 0 days 00:03:20
"""
from pandas import DataFrame, Series
if index is not False:
if index in (None, True):
# Default is to use the table primary key if available and a single column
if self.primary_key and len(self.primary_key) == 1:
index = self.primary_key[0]
else:
index = False
else:
if index not in self.colnames:
raise ValueError('index must be None, False, True or a table '
'column name')
def _encode_mixins(tbl):
"""Encode a Table ``tbl`` that may have mixin columns to a Table with only
astropy Columns + appropriate meta-data to allow subsequent decoding.
"""
from . import serialize
from astropy.time import TimeBase, TimeDelta
# Convert any Time or TimeDelta columns and pay attention to masking
time_cols = [col for col in tbl.itercols() if isinstance(col, TimeBase)]
if time_cols:
# Make a light copy of table and clear any indices
new_cols = []
for col in tbl.itercols():
new_col = col_copy(col, copy_indices=False) if col.info.indices else col
new_cols.append(new_col)
tbl = tbl.__class__(new_cols, copy=False)
# Certain subclasses (e.g. TimeSeries) may generate new indices on
# table creation, so make sure there are no indices on the table.
for col in tbl.itercols():
col.info.indices.clear()
for col in time_cols:
if isinstance(col, TimeDelta):
# Convert to nanoseconds (matches astropy datetime64 support)
new_col = (col.sec * 1e9).astype('timedelta64[ns]')
nat = np.timedelta64('NaT')
else:
new_col = col.datetime64.copy()
nat = np.datetime64('NaT')
if col.masked:
new_col[col.mask] = nat
tbl[col.info.name] = new_col
# Convert the table to one with no mixins, only Column objects.
encode_tbl = serialize.represent_mixins_as_columns(tbl)
return encode_tbl
tbl = _encode_mixins(self)
badcols = [name for name, col in self.columns.items() if len(col.shape) > 1]
if badcols:
raise ValueError(
f'Cannot convert a table with multidimensional columns to a '
f'pandas DataFrame. Offending columns are: {badcols}\n'
f'One can filter out such columns using:\n'
f'names = [name for name in tbl.colnames if len(tbl[name].shape) <= 1]\n'
f'tbl[names].to_pandas(...)')
out = OrderedDict()
for name, column in tbl.columns.items():
if getattr(column.dtype, 'isnative', True):
out[name] = column
else:
out[name] = column.data.byteswap().newbyteorder('=')
if isinstance(column, MaskedColumn) and np.any(column.mask):
if column.dtype.kind in ['i', 'u']:
pd_dtype = column.dtype.name
if use_nullable_int:
# Convert int64 to Int64, uint32 to UInt32, etc for nullable types
pd_dtype = pd_dtype.replace('i', 'I').replace('u', 'U')
out[name] = Series(out[name], dtype=pd_dtype)
# If pandas is older than 0.24 the type may have turned to float
if column.dtype.kind != out[name].dtype.kind:
warnings.warn(
f"converted column '{name}' from {column.dtype} to {out[name].dtype}",
TableReplaceWarning, stacklevel=3)
elif column.dtype.kind not in ['f', 'c']:
out[name] = column.astype(object).filled(np.nan)
kwargs = {}
if index:
idx = out.pop(index)
kwargs['index'] = idx
# We add the table index to Series inputs (MaskedColumn with int values) to override
# its default RangeIndex, see #11432
for v in out.values():
if isinstance(v, Series):
v.index = idx
df = DataFrame(out, **kwargs)
if index:
# Explicitly set the pandas DataFrame index to the original table
# index name.
df.index.name = idx.info.name
return df
@classmethod
def from_pandas(cls, dataframe, index=False, units=None):
"""
Create a `~astropy.table.Table` from a :class:`pandas.DataFrame` instance
In addition to converting generic numeric or string columns, this supports
conversion of pandas Date and Time delta columns to `~astropy.time.Time`
and `~astropy.time.TimeDelta` columns, respectively.
Parameters
----------
dataframe : :class:`pandas.DataFrame`
A pandas :class:`pandas.DataFrame` instance
index : bool
Include the index column in the returned table (default=False)
units: dict
A dict mapping column names to to a `~astropy.units.Unit`.
The columns will have the specified unit in the Table.
Returns
-------
table : `~astropy.table.Table`
A `~astropy.table.Table` (or subclass) instance
Raises
------
ImportError
If pandas is not installed
Examples
--------
Here we convert a :class:`pandas.DataFrame` instance
to a `~astropy.table.QTable`.
>>> import numpy as np
>>> import pandas as pd
>>> from astropy.table import QTable
>>> time = pd.Series(['1998-01-01', '2002-01-01'], dtype='datetime64[ns]')
>>> dt = pd.Series(np.array([1, 300], dtype='timedelta64[s]'))
>>> df = pd.DataFrame({'time': time})
>>> df['dt'] = dt
>>> df['x'] = [3., 4.]
>>> with pd.option_context('display.max_columns', 20):
... print(df)
time dt x
0 1998-01-01 0 days 00:00:01 3.0
1 2002-01-01 0 days 00:05:00 4.0
>>> QTable.from_pandas(df)
<QTable length=2>
time dt x
Time TimeDelta float64
----------------------- --------- -------
1998-01-01T00:00:00.000 1.0 3.0
2002-01-01T00:00:00.000 300.0 4.0
"""
out = OrderedDict()
names = list(dataframe.columns)
columns = [dataframe[name] for name in names]
datas = [np.array(column) for column in columns]
masks = [np.array(column.isnull()) for column in columns]
if index:
index_name = dataframe.index.name or 'index'
while index_name in names:
index_name = '_' + index_name + '_'
names.insert(0, index_name)
columns.insert(0, dataframe.index)
datas.insert(0, np.array(dataframe.index))
masks.insert(0, np.zeros(len(dataframe), dtype=bool))
if units is None:
units = [None] * len(names)
else:
if not isinstance(units, Mapping):
raise TypeError('Expected a Mapping "column-name" -> "unit"')
not_found = set(units.keys()) - set(names)
if not_found:
warnings.warn(f'`units` contains additional columns: {not_found}')
units = [units.get(name) for name in names]
for name, column, data, mask, unit in zip(names, columns, datas, masks, units):
if column.dtype.kind in ['u', 'i'] and np.any(mask):
# Special-case support for pandas nullable int
np_dtype = str(column.dtype).lower()
data = np.zeros(shape=column.shape, dtype=np_dtype)
data[~mask] = column[~mask]
out[name] = MaskedColumn(data=data, name=name, mask=mask, unit=unit, copy=False)
continue
if data.dtype.kind == 'O':
# If all elements of an object array are string-like or np.nan
# then coerce back to a native numpy str/unicode array.
string_types = (str, bytes)
nan = np.nan
if all(isinstance(x, string_types) or x is nan for x in data):
# Force any missing (null) values to b''. Numpy will
# upcast to str/unicode as needed.
data[mask] = b''
# When the numpy object array is represented as a list then
# numpy initializes to the correct string or unicode type.
data = np.array([x for x in data])
# Numpy datetime64
if data.dtype.kind == 'M':
from astropy.time import Time
out[name] = Time(data, format='datetime64')
if np.any(mask):
out[name][mask] = np.ma.masked
out[name].format = 'isot'
# Numpy timedelta64
elif data.dtype.kind == 'm':
from astropy.time import TimeDelta
data_sec = data.astype('timedelta64[ns]').astype(np.float64) / 1e9
out[name] = TimeDelta(data_sec, format='sec')
if np.any(mask):
out[name][mask] = np.ma.masked
else:
if np.any(mask):
out[name] = MaskedColumn(data=data, name=name, mask=mask, unit=unit)
else:
out[name] = Column(data=data, name=name, unit=unit)
return cls(out)
info = TableInfo()
class QTable(Table):
"""A class to represent tables of heterogeneous data.
`~astropy.table.QTable` provides a class for heterogeneous tabular data
which can be easily modified, for instance adding columns or new rows.
The `~astropy.table.QTable` class is identical to `~astropy.table.Table`
except that columns with an associated ``unit`` attribute are converted to
`~astropy.units.Quantity` objects.
See also:
- https://docs.astropy.org/en/stable/table/
- https://docs.astropy.org/en/stable/table/mixin_columns.html
Parameters
----------
data : numpy ndarray, dict, list, table-like object, optional
Data to initialize table.
masked : bool, optional
Specify whether the table is masked.
names : list, optional
Specify column names.
dtype : list, optional
Specify column data types.
meta : dict, optional
Metadata associated with the table.
copy : bool, optional
Copy the input data. Default is True.
rows : numpy ndarray, list of list, optional
Row-oriented data for table instead of ``data`` argument.
copy_indices : bool, optional
Copy any indices in the input data. Default is True.
**kwargs : dict, optional
Additional keyword args when converting table-like object.
"""
def _is_mixin_for_table(self, col):
"""
Determine if ``col`` should be added to the table directly as
a mixin column.
"""
return has_info_class(col, MixinInfo)
def _convert_col_for_table(self, col):
if isinstance(col, Column) and getattr(col, 'unit', None) is not None:
# We need to turn the column into a quantity; use subok=True to allow
# Quantity subclasses identified in the unit (such as u.mag()).
q_cls = Masked(Quantity) if isinstance(col, MaskedColumn) else Quantity
try:
qcol = q_cls(col.data, col.unit, copy=False, subok=True)
except Exception as exc:
warnings.warn(f"column {col.info.name} has a unit but is kept as "
f"a {col.__class__.__name__} as an attempt to "
f"convert it to Quantity failed with:\n{exc!r}",
AstropyUserWarning)
else:
qcol.info = col.info
qcol.info.indices = col.info.indices
col = qcol
else:
col = super()._convert_col_for_table(col)
return col
|
deab9063a209316a2a3e17398b31d6ea96b6774b4922d6c734de6e2180c201e2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import itertools
import warnings
import weakref
from copy import deepcopy
import numpy as np
from numpy import ma
from astropy.units import Unit, Quantity, StructuredUnit
from astropy.utils.console import color_print
from astropy.utils.metadata import MetaData
from astropy.utils.data_info import BaseColumnInfo, dtype_info_name
from astropy.utils.misc import dtype_bytes_or_chars
from . import groups
from . import pprint
# These "shims" provide __getitem__ implementations for Column and MaskedColumn
from ._column_mixins import _ColumnGetitemShim, _MaskedColumnGetitemShim
# Create a generic TableFormatter object for use by bare columns with no
# parent table.
FORMATTER = pprint.TableFormatter()
class StringTruncateWarning(UserWarning):
"""
Warning class for when a string column is assigned a value
that gets truncated because the base (numpy) string length
is too short.
This does not inherit from AstropyWarning because we want to use
stacklevel=2 to show the user where the issue occurred in their code.
"""
pass
# Always emit this warning, not just the first instance
warnings.simplefilter('always', StringTruncateWarning)
def _auto_names(n_cols):
from . import conf
return [str(conf.auto_colname).format(i) for i in range(n_cols)]
# list of one and two-dimensional comparison functions, which sometimes return
# a Column class and sometimes a plain array. Used in __array_wrap__ to ensure
# they only return plain (masked) arrays (see #1446 and #1685)
_comparison_functions = {
np.greater, np.greater_equal, np.less, np.less_equal,
np.not_equal, np.equal,
np.isfinite, np.isinf, np.isnan, np.sign, np.signbit}
def col_copy(col, copy_indices=True):
"""
Mixin-safe version of Column.copy() (with copy_data=True).
Parameters
----------
col : Column or mixin column
Input column
copy_indices : bool
Copy the column ``indices`` attribute
Returns
-------
col : Copy of input column
"""
if isinstance(col, BaseColumn):
return col.copy()
newcol = col.copy() if hasattr(col, 'copy') else deepcopy(col)
# If the column has info defined, we copy it and adjust any indices
# to point to the copied column. By guarding with the if statement,
# we avoid side effects (of creating the default info instance).
if 'info' in col.__dict__:
newcol.info = col.info
if copy_indices and col.info.indices:
newcol.info.indices = deepcopy(col.info.indices)
for index in newcol.info.indices:
index.replace_col(col, newcol)
return newcol
class FalseArray(np.ndarray):
"""
Boolean mask array that is always False.
This is used to create a stub ``mask`` property which is a boolean array of
``False`` used by default for mixin columns and corresponding to the mixin
column data shape. The ``mask`` looks like a normal numpy array but an
exception will be raised if ``True`` is assigned to any element. The
consequences of the limitation are most obvious in the high-level table
operations.
Parameters
----------
shape : tuple
Data shape
"""
def __new__(cls, shape):
obj = np.zeros(shape, dtype=bool).view(cls)
return obj
def __setitem__(self, item, val):
val = np.asarray(val)
if np.any(val):
raise ValueError('Cannot set any element of {} class to True'
.format(self.__class__.__name__))
def _expand_string_array_for_values(arr, values):
"""
For string-dtype return a version of ``arr`` that is wide enough for ``values``.
If ``arr`` is not string-dtype or does not need expansion then return ``arr``.
Parameters
----------
arr : np.ndarray
Input array
values : scalar or array-like
Values for width comparison for string arrays
Returns
-------
arr_expanded : np.ndarray
"""
if arr.dtype.kind in ('U', 'S') and values is not np.ma.masked:
# Find the length of the longest string in the new values.
values_str_len = np.char.str_len(values).max()
# Determine character repeat count of arr.dtype. Returns a positive
# int or None (something like 'U0' is not possible in numpy). If new values
# are longer than current then make a new (wider) version of arr.
arr_str_len = dtype_bytes_or_chars(arr.dtype)
if arr_str_len and values_str_len > arr_str_len:
arr_dtype = arr.dtype.byteorder + arr.dtype.kind + str(values_str_len)
arr = arr.astype(arr_dtype)
return arr
def _convert_sequence_data_to_array(data, dtype=None):
"""Convert N-d sequence-like data to ndarray or MaskedArray.
This is the core function for converting Python lists or list of lists to a
numpy array. This handles embedded np.ma.masked constants in ``data`` along
with the special case of an homogeneous list of MaskedArray elements.
Considerations:
- np.ma.array is about 50 times slower than np.array for list input. This
function avoids using np.ma.array on list input.
- np.array emits a UserWarning for embedded np.ma.masked, but only for int
or float inputs. For those it converts to np.nan and forces float dtype.
For other types np.array is inconsistent, for instance converting
np.ma.masked to "0.0" for str types.
- Searching in pure Python for np.ma.masked in ``data`` is comparable in
speed to calling ``np.array(data)``.
- This function may end up making two additional copies of input ``data``.
Parameters
----------
data : N-d sequence
Input data, typically list or list of lists
dtype : None or dtype-like
Output datatype (None lets np.array choose)
Returns
-------
np_data : np.ndarray or np.ma.MaskedArray
"""
np_ma_masked = np.ma.masked # Avoid repeated lookups of this object
# Special case of an homogeneous list of MaskedArray elements (see #8977).
# np.ma.masked is an instance of MaskedArray, so exclude those values.
if (hasattr(data, '__len__')
and len(data) > 0
and all(isinstance(val, np.ma.MaskedArray)
and val is not np_ma_masked for val in data)):
np_data = np.ma.array(data, dtype=dtype)
return np_data
# First convert data to a plain ndarray. If there are instances of np.ma.masked
# in the data this will issue a warning for int and float.
with warnings.catch_warnings(record=True) as warns:
# Ensure this warning from numpy is always enabled and that it is not
# converted to an error (which can happen during pytest).
warnings.filterwarnings('always', category=UserWarning,
message='.*converting a masked element.*')
# FutureWarning in numpy 1.21. See https://github.com/astropy/astropy/issues/11291
# and https://github.com/numpy/numpy/issues/18425.
warnings.filterwarnings('always', category=FutureWarning,
message='.*Promotion of numbers and bools to strings.*')
try:
np_data = np.array(data, dtype=dtype)
except np.ma.MaskError:
# Catches case of dtype=int with masked values, instead let it
# convert to float
np_data = np.array(data)
except Exception:
# Conversion failed for some reason, e.g. [2, 1*u.m] gives TypeError in Quantity.
# First try to interpret the data as Quantity. If that still fails then fall
# through to object
try:
np_data = Quantity(data, dtype)
except Exception:
dtype = object
np_data = np.array(data, dtype=dtype)
if np_data.ndim == 0 or (np_data.ndim > 0 and len(np_data) == 0):
# Implies input was a scalar or an empty list (e.g. initializing an
# empty table with pre-declared names and dtypes but no data). Here we
# need to fall through to initializing with the original data=[].
return data
# If there were no warnings and the data are int or float, then we are done.
# Other dtypes like string or complex can have masked values and the
# np.array() conversion gives the wrong answer (e.g. converting np.ma.masked
# to the string "0.0").
if len(warns) == 0 and np_data.dtype.kind in ('i', 'f'):
return np_data
# Now we need to determine if there is an np.ma.masked anywhere in input data.
# Make a statement like below to look for np.ma.masked in a nested sequence.
# Because np.array(data) succeeded we know that `data` has a regular N-d
# structure. Find ma_masked:
# any(any(any(d2 is ma_masked for d2 in d1) for d1 in d0) for d0 in data)
# Using this eval avoids creating a copy of `data` in the more-usual case of
# no masked elements.
any_statement = 'd0 is ma_masked'
for ii in reversed(range(np_data.ndim)):
if ii == 0:
any_statement = f'any({any_statement} for d0 in data)'
elif ii == np_data.ndim - 1:
any_statement = f'any(d{ii} is ma_masked for d{ii} in d{ii-1})'
else:
any_statement = f'any({any_statement} for d{ii} in d{ii-1})'
context = {'ma_masked': np.ma.masked, 'data': data}
has_masked = eval(any_statement, context)
# If there are any masks then explicitly change each one to a fill value and
# set a mask boolean array. If not has_masked then we're done.
if has_masked:
mask = np.zeros(np_data.shape, dtype=bool)
data_filled = np.array(data, dtype=object)
# Make type-appropriate fill value based on initial conversion.
if np_data.dtype.kind == 'U':
fill = ''
elif np_data.dtype.kind == 'S':
fill = b''
else:
# Zero works for every numeric type.
fill = 0
ranges = [range(dim) for dim in np_data.shape]
for idxs in itertools.product(*ranges):
val = data_filled[idxs]
if val is np_ma_masked:
data_filled[idxs] = fill
mask[idxs] = True
elif isinstance(val, bool) and dtype is None:
# If we see a bool and dtype not specified then assume bool for
# the entire array. Not perfect but in most practical cases OK.
# Unfortunately numpy types [False, 0] as int, not bool (and
# [False, np.ma.masked] => array([0.0, np.nan])).
dtype = bool
# If no dtype is provided then need to convert back to list so np.array
# does type autodetection.
if dtype is None:
data_filled = data_filled.tolist()
# Use np.array first to convert `data` to ndarray (fast) and then make
# masked array from an ndarray with mask (fast) instead of from `data`.
np_data = np.ma.array(np.array(data_filled, dtype=dtype), mask=mask)
return np_data
def _make_compare(oper):
"""
Make Column comparison methods which encode the ``other`` object to utf-8
in the case of a bytestring dtype for Py3+.
Parameters
----------
oper : str
Operator name
"""
def _compare(self, other):
op = oper # copy enclosed ref to allow swap below
# If other is a Quantity, we should let it do the work, since
# it can deal with our possible unit (which, for MaskedColumn,
# would get dropped below, as '.data' is accessed in super()).
if isinstance(other, Quantity):
return NotImplemented
# If we are unicode and other is a column with bytes, defer to it for
# doing the unicode sandwich. This avoids problems like those
# discussed in #6838 and #6899.
if (self.dtype.kind == 'U'
and isinstance(other, Column) and other.dtype.kind == 'S'):
return NotImplemented
# If we are bytes, encode other as needed.
if self.dtype.char == 'S':
other = self._encode_str(other)
# Now just let the regular ndarray.__eq__, etc., take over.
result = getattr(super(Column, self), op)(other)
# But we should not return Column instances for this case.
return result.data if isinstance(result, Column) else result
return _compare
class ColumnInfo(BaseColumnInfo):
"""
Container for meta information like name, description, format.
This is required when the object is used as a mixin column within a table,
but can be used as a general way to store meta information.
"""
attr_names = BaseColumnInfo.attr_names | {'groups'}
_attrs_no_copy = BaseColumnInfo._attrs_no_copy | {'groups'}
attrs_from_parent = attr_names
_supports_indexing = True
# For structured columns, data is used to store a dict of columns.
# Store entries in that dict as name.key instead of name.data.key.
_represent_as_dict_primary_data = 'data'
def _represent_as_dict(self):
result = super()._represent_as_dict()
names = self._parent.dtype.names
# For a regular column, we are done, but for a structured
# column, we use a SerializedColumns to store the pieces.
if names is None:
return result
from .serialize import SerializedColumn
data = SerializedColumn()
# If this column has a StructuredUnit, we split it and store
# it on the corresponding part. Otherwise, we just store it
# as an attribute below. All other attributes we remove from
# the parts, so that we do not store them multiple times.
# (Note that attributes are not linked to the parent, so it
# is safe to reset them.)
# TODO: deal with (some of) this in Column.__getitem__?
# Alternatively: should we store info on the first part?
# TODO: special-case format somehow? Can we have good formats
# for structured columns?
unit = self.unit
if isinstance(unit, StructuredUnit) and len(unit) == len(names):
units = unit.values()
unit = None # No need to store as an attribute as well.
else:
units = [None] * len(names)
for name, part_unit in zip(names, units):
part = self._parent[name]
part.unit = part_unit
part.description = None
part.meta = {}
part.format = None
data[name] = part
# Create the attributes required to reconstruct the column.
result['data'] = data
# Store the shape if needed. Just like scalar data, a structured data
# column (e.g. with dtype `f8,i8`) can be multidimensional within each
# row and have a shape, and that needs to be distinguished from the
# case that each entry in the structure has the same shape (e.g.,
# distinguist a column with dtype='f8,i8' and 2 elements per row from
# one with dtype '2f8,2i8' and just one element per row).
if shape := self._parent.shape[1:]:
result['shape'] = list(shape)
# Also store the standard info attributes since these are
# stored on the parent and can thus just be passed on as
# arguments. TODO: factor out with essentially the same
# code in serialize._represent_mixin_as_column.
if unit is not None and unit != '':
result['unit'] = unit
if self.format is not None:
result['format'] = self.format
if self.description is not None:
result['description'] = self.description
if self.meta:
result['meta'] = self.meta
return result
def _construct_from_dict(self, map):
if not isinstance(map.get('data'), dict):
return super()._construct_from_dict(map)
# Reconstruct a structured Column, by first making an empty column
# and then filling it with the structured data.
data = map.pop('data')
shape = tuple(map.pop('shape', ()))
# There are three elements in the shape of `part`:
# (table length, shape of structured column, shape of part like '3f8')
# The column `shape` only includes the second, so by adding one to its
# length to include the table length, we pick off a possible last bit.
dtype = np.dtype([(name, part.dtype, part.shape[len(shape)+1:])
for name, part in data.items()])
units = tuple(col.info.unit for col in data.values())
if all(unit is not None for unit in units):
map['unit'] = StructuredUnit(units, dtype)
map.update(dtype=dtype, shape=shape, length=len(data[dtype.names[0]]))
# Construct the empty column from `map` (note: 'data' removed above).
result = super()._construct_from_dict(map)
# Fill it with the structured data.
for name in dtype.names:
result[name] = data[name]
return result
def new_like(self, cols, length, metadata_conflicts='warn', name=None):
"""
Return a new Column instance which is consistent with the
input ``cols`` and has ``length`` rows.
This is intended for creating an empty column object whose elements can
be set in-place for table operations like join or vstack.
Parameters
----------
cols : list
List of input columns
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : Column (or subclass)
New instance of this class consistent with ``cols``
"""
attrs = self.merge_cols_attributes(cols, metadata_conflicts, name,
('meta', 'unit', 'format', 'description'))
return self._parent_cls(length=length, **attrs)
def get_sortable_arrays(self):
"""
Return a list of arrays which can be lexically sorted to represent
the order of the parent column.
For Column this is just the column itself.
Returns
-------
arrays : list of ndarray
"""
return [self._parent]
class BaseColumn(_ColumnGetitemShim, np.ndarray):
meta = MetaData()
def __new__(cls, data=None, name=None,
dtype=None, shape=(), length=0,
description=None, unit=None, format=None, meta=None,
copy=False, copy_indices=True):
if data is None:
self_data = np.zeros((length,)+shape, dtype=dtype)
elif isinstance(data, BaseColumn) and hasattr(data, '_name'):
# When unpickling a MaskedColumn, ``data`` will be a bare
# BaseColumn with none of the expected attributes. In this case
# do NOT execute this block which initializes from ``data``
# attributes.
self_data = np.array(data.data, dtype=dtype, copy=copy)
if description is None:
description = data.description
if unit is None:
unit = unit or data.unit
if format is None:
format = data.format
if meta is None:
meta = data.meta
if name is None:
name = data.name
elif isinstance(data, Quantity):
if unit is None:
self_data = np.array(data, dtype=dtype, copy=copy)
unit = data.unit
else:
self_data = Quantity(data, unit, dtype=dtype, copy=copy).value
# If 'info' has been defined, copy basic properties (if needed).
if 'info' in data.__dict__:
if description is None:
description = data.info.description
if format is None:
format = data.info.format
if meta is None:
meta = data.info.meta
else:
if np.dtype(dtype).char == 'S':
data = cls._encode_str(data)
self_data = np.array(data, dtype=dtype, copy=copy)
self = self_data.view(cls)
self._name = None if name is None else str(name)
self._parent_table = None
self.unit = unit
self._format = format
self.description = description
self.meta = meta
self.indices = deepcopy(getattr(data, 'indices', [])) if copy_indices else []
for index in self.indices:
index.replace_col(data, self)
return self
@property
def data(self):
return self.view(np.ndarray)
@property
def value(self):
"""
An alias for the existing ``data`` attribute.
"""
return self.data
@property
def parent_table(self):
# Note: It seems there are some cases where _parent_table is not set,
# such after restoring from a pickled Column. Perhaps that should be
# fixed, but this is also okay for now.
if getattr(self, '_parent_table', None) is None:
return None
else:
return self._parent_table()
@parent_table.setter
def parent_table(self, table):
if table is None:
self._parent_table = None
else:
self._parent_table = weakref.ref(table)
info = ColumnInfo()
def copy(self, order='C', data=None, copy_data=True):
"""
Return a copy of the current instance.
If ``data`` is supplied then a view (reference) of ``data`` is used,
and ``copy_data`` is ignored.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if ``a`` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of ``a`` as closely
as possible. (Note that this function and :func:numpy.copy are very
similar, but have different default values for their order=
arguments.) Default is 'C'.
data : array, optional
If supplied then use a view of ``data`` instead of the instance
data. This allows copying the instance attributes and meta.
copy_data : bool, optional
Make a copy of the internal numpy array instead of using a
reference. Default is True.
Returns
-------
col : Column or MaskedColumn
Copy of the current column (same type as original)
"""
if data is None:
data = self.data
if copy_data:
data = data.copy(order)
out = data.view(self.__class__)
out.__array_finalize__(self)
# If there is meta on the original column then deepcopy (since "copy" of column
# implies complete independence from original). __array_finalize__ will have already
# made a light copy. I'm not sure how to avoid that initial light copy.
if self.meta is not None:
out.meta = self.meta # MetaData descriptor does a deepcopy here
# for MaskedColumn, MaskedArray.__array_finalize__ also copies mask
# from self, which is not the idea here, so undo
if isinstance(self, MaskedColumn):
out._mask = data._mask
self._copy_groups(out)
return out
def __setstate__(self, state):
"""
Restore the internal state of the Column/MaskedColumn for pickling
purposes. This requires that the last element of ``state`` is a
5-tuple that has Column-specific state values.
"""
# Get the Column attributes
names = ('_name', '_unit', '_format', 'description', 'meta', 'indices')
attrs = {name: val for name, val in zip(names, state[-1])}
state = state[:-1]
# Using super().__setstate__(state) gives
# "TypeError 'int' object is not iterable", raised in
# astropy.table._column_mixins._ColumnGetitemShim.__setstate_cython__()
# Previously, it seems to have given an infinite recursion.
# Hence, manually call the right super class to actually set up
# the array object.
super_class = ma.MaskedArray if isinstance(self, ma.MaskedArray) else np.ndarray
super_class.__setstate__(self, state)
# Set the Column attributes
for name, val in attrs.items():
setattr(self, name, val)
self._parent_table = None
def __reduce__(self):
"""
Return a 3-tuple for pickling a Column. Use the super-class
functionality but then add in a 5-tuple of Column-specific values
that get used in __setstate__.
"""
super_class = ma.MaskedArray if isinstance(self, ma.MaskedArray) else np.ndarray
reconstruct_func, reconstruct_func_args, state = super_class.__reduce__(self)
# Define Column-specific attrs and meta that gets added to state.
column_state = (self.name, self.unit, self.format, self.description,
self.meta, self.indices)
state = state + (column_state,)
return reconstruct_func, reconstruct_func_args, state
def __array_finalize__(self, obj):
# Obj will be none for direct call to Column() creator
if obj is None:
return
if callable(super().__array_finalize__):
super().__array_finalize__(obj)
# Self was created from template (e.g. obj[slice] or (obj * 2))
# or viewcast e.g. obj.view(Column). In either case we want to
# init Column attributes for self from obj if possible.
self.parent_table = None
if not hasattr(self, 'indices'): # may have been copied in __new__
self.indices = []
self._copy_attrs(obj)
if 'info' in getattr(obj, '__dict__', {}):
self.info = obj.info
def __array_wrap__(self, out_arr, context=None):
"""
__array_wrap__ is called at the end of every ufunc.
Normally, we want a Column object back and do not have to do anything
special. But there are two exceptions:
1) If the output shape is different (e.g. for reduction ufuncs
like sum() or mean()), a Column still linking to a parent_table
makes little sense, so we return the output viewed as the
column content (ndarray or MaskedArray).
For this case, we use "[()]" to select everything, and to ensure we
convert a zero rank array to a scalar. (For some reason np.sum()
returns a zero rank scalar array while np.mean() returns a scalar;
So the [()] is needed for this case.
2) When the output is created by any function that returns a boolean
we also want to consistently return an array rather than a column
(see #1446 and #1685)
"""
out_arr = super().__array_wrap__(out_arr, context)
if (self.shape != out_arr.shape
or (isinstance(out_arr, BaseColumn)
and (context is not None
and context[0] in _comparison_functions))):
return out_arr.data[()]
else:
return out_arr
@property
def name(self):
"""
The name of this column.
"""
return self._name
@name.setter
def name(self, val):
if val is not None:
val = str(val)
if self.parent_table is not None:
table = self.parent_table
table.columns._rename_column(self.name, val)
self._name = val
@property
def format(self):
"""
Format string for displaying values in this column.
"""
return self._format
@format.setter
def format(self, format_string):
prev_format = getattr(self, '_format', None)
self._format = format_string # set new format string
try:
# test whether it formats without error exemplarily
self.pformat(max_lines=1)
except Exception as err:
# revert to restore previous format if there was one
self._format = prev_format
raise ValueError(
"Invalid format for column '{}': could not display "
"values in this column using this format".format(
self.name)) from err
@property
def descr(self):
"""Array-interface compliant full description of the column.
This returns a 3-tuple (name, type, shape) that can always be
used in a structured array dtype definition.
"""
return (self.name, self.dtype.str, self.shape[1:])
def iter_str_vals(self):
"""
Return an iterator that yields the string-formatted values of this
column.
Returns
-------
str_vals : iterator
Column values formatted as strings
"""
# Iterate over formatted values with no max number of lines, no column
# name, no unit, and ignoring the returned header info in outs.
_pformat_col_iter = self._formatter._pformat_col_iter
yield from _pformat_col_iter(self, -1, show_name=False, show_unit=False,
show_dtype=False, outs={})
def attrs_equal(self, col):
"""Compare the column attributes of ``col`` to this object.
The comparison attributes are: ``name``, ``unit``, ``dtype``,
``format``, ``description``, and ``meta``.
Parameters
----------
col : Column
Comparison column
Returns
-------
equal : bool
True if all attributes are equal
"""
if not isinstance(col, BaseColumn):
raise ValueError('Comparison `col` must be a Column or '
'MaskedColumn object')
attrs = ('name', 'unit', 'dtype', 'format', 'description', 'meta')
equal = all(getattr(self, x) == getattr(col, x) for x in attrs)
return equal
@property
def _formatter(self):
return FORMATTER if (self.parent_table is None) else self.parent_table.formatter
def pformat(self, max_lines=None, show_name=True, show_unit=False, show_dtype=False,
html=False):
"""Return a list of formatted string representation of column values.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default will be
determined using the ``astropy.conf.max_lines`` configuration
item. If a negative value of ``max_lines`` is supplied then
there is no line limit applied.
Parameters
----------
max_lines : int
Maximum lines of output (header + data rows)
show_name : bool
Include column name. Default is True.
show_unit : bool
Include a header row for unit. Default is False.
show_dtype : bool
Include column dtype. Default is False.
html : bool
Format the output as an HTML table. Default is False.
Returns
-------
lines : list
List of lines with header and formatted column values
"""
_pformat_col = self._formatter._pformat_col
lines, outs = _pformat_col(self, max_lines, show_name=show_name,
show_unit=show_unit, show_dtype=show_dtype,
html=html)
return lines
def pprint(self, max_lines=None, show_name=True, show_unit=False, show_dtype=False):
"""Print a formatted string representation of column values.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default will be
determined using the ``astropy.conf.max_lines`` configuration
item. If a negative value of ``max_lines`` is supplied then
there is no line limit applied.
Parameters
----------
max_lines : int
Maximum number of values in output
show_name : bool
Include column name. Default is True.
show_unit : bool
Include a header row for unit. Default is False.
show_dtype : bool
Include column dtype. Default is True.
"""
_pformat_col = self._formatter._pformat_col
lines, outs = _pformat_col(self, max_lines, show_name=show_name, show_unit=show_unit,
show_dtype=show_dtype)
n_header = outs['n_header']
for i, line in enumerate(lines):
if i < n_header:
color_print(line, 'red')
else:
print(line)
def more(self, max_lines=None, show_name=True, show_unit=False):
"""Interactively browse column with a paging interface.
Supported keys::
f, <space> : forward one page
b : back one page
r : refresh same page
n : next row
p : previous row
< : go to beginning
> : go to end
q : quit browsing
h : print this help
Parameters
----------
max_lines : int
Maximum number of lines in table output.
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is False.
"""
_more_tabcol = self._formatter._more_tabcol
_more_tabcol(self, max_lines=max_lines, show_name=show_name,
show_unit=show_unit)
@property
def unit(self):
"""
The unit associated with this column. May be a string or a
`astropy.units.UnitBase` instance.
Setting the ``unit`` property does not change the values of the
data. To perform a unit conversion, use ``convert_unit_to``.
"""
return self._unit
@unit.setter
def unit(self, unit):
if unit is None:
self._unit = None
else:
self._unit = Unit(unit, parse_strict='silent')
@unit.deleter
def unit(self):
self._unit = None
def searchsorted(self, v, side='left', sorter=None):
# For bytes type data, encode the `v` value as UTF-8 (if necessary) before
# calling searchsorted. This prevents a factor of 1000 slowdown in
# searchsorted in this case.
a = self.data
if a.dtype.kind == 'S' and not isinstance(v, bytes):
v = np.asarray(v)
if v.dtype.kind == 'U':
v = np.char.encode(v, 'utf-8')
return np.searchsorted(a, v, side=side, sorter=sorter)
searchsorted.__doc__ = np.ndarray.searchsorted.__doc__
def convert_unit_to(self, new_unit, equivalencies=[]):
"""
Converts the values of the column in-place from the current
unit to the given unit.
To change the unit associated with this column without
actually changing the data values, simply set the ``unit``
property.
Parameters
----------
new_unit : str or `astropy.units.UnitBase` instance
The unit to convert to.
equivalencies : list of tuple
A list of equivalence pairs to try if the unit are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
Raises
------
astropy.units.UnitsError
If units are inconsistent
"""
if self.unit is None:
raise ValueError("No unit set on column")
self.data[:] = self.unit.to(
new_unit, self.data, equivalencies=equivalencies)
self.unit = new_unit
@property
def groups(self):
if not hasattr(self, '_groups'):
self._groups = groups.ColumnGroups(self)
return self._groups
def group_by(self, keys):
"""
Group this column by the specified ``keys``
This effectively splits the column into groups which correspond to
unique values of the ``keys`` grouping object. The output is a new
`Column` or `MaskedColumn` which contains a copy of this column but
sorted by row according to ``keys``.
The ``keys`` input to ``group_by`` must be a numpy array with the
same length as this column.
Parameters
----------
keys : numpy array
Key grouping object
Returns
-------
out : Column
New column with groups attribute set accordingly
"""
return groups.column_group_by(self, keys)
def _copy_groups(self, out):
"""
Copy current groups into a copy of self ``out``
"""
if self.parent_table:
if hasattr(self.parent_table, '_groups'):
out._groups = groups.ColumnGroups(out, indices=self.parent_table._groups._indices)
elif hasattr(self, '_groups'):
out._groups = groups.ColumnGroups(out, indices=self._groups._indices)
# Strip off the BaseColumn-ness for repr and str so that
# MaskedColumn.data __repr__ does not include masked_BaseColumn(data =
# [1 2], ...).
def __repr__(self):
return np.asarray(self).__repr__()
@property
def quantity(self):
"""
A view of this table column as a `~astropy.units.Quantity` object with
units given by the Column's `unit` parameter.
"""
# the Quantity initializer is used here because it correctly fails
# if the column's values are non-numeric (like strings), while .view
# will happily return a quantity with gibberish for numerical values
return Quantity(self, self.unit, copy=False, dtype=self.dtype, order='A', subok=True)
def to(self, unit, equivalencies=[], **kwargs):
"""
Converts this table column to a `~astropy.units.Quantity` object with
the requested units.
Parameters
----------
unit : unit-like
The unit to convert to (i.e., a valid argument to the
:meth:`astropy.units.Quantity.to` method).
equivalencies : list of tuple
Equivalencies to use for this conversion. See
:meth:`astropy.units.Quantity.to` for more details.
Returns
-------
quantity : `~astropy.units.Quantity`
A quantity object with the contents of this column in the units
``unit``.
"""
return self.quantity.to(unit, equivalencies)
def _copy_attrs(self, obj):
"""
Copy key column attributes from ``obj`` to self
"""
for attr in ('name', 'unit', '_format', 'description'):
val = getattr(obj, attr, None)
setattr(self, attr, val)
# Light copy of meta if it is not empty
obj_meta = getattr(obj, 'meta', None)
if obj_meta:
self.meta = obj_meta.copy()
@staticmethod
def _encode_str(value):
"""
Encode anything that is unicode-ish as utf-8. This method is only
called for Py3+.
"""
if isinstance(value, str):
value = value.encode('utf-8')
elif isinstance(value, bytes) or value is np.ma.masked:
pass
else:
arr = np.asarray(value)
if arr.dtype.char == 'U':
arr = np.char.encode(arr, encoding='utf-8')
if isinstance(value, np.ma.MaskedArray):
arr = np.ma.array(arr, mask=value.mask, copy=False)
value = arr
return value
def tolist(self):
if self.dtype.kind == 'S':
return np.chararray.decode(self, encoding='utf-8').tolist()
else:
return super().tolist()
class Column(BaseColumn):
"""Define a data column for use in a Table object.
Parameters
----------
data : list, ndarray, or None
Column data values
name : str
Column name and key for reference within Table
dtype : `~numpy.dtype`-like
Data type for column
shape : tuple or ()
Dimensions of a single row element in the column data
length : int or 0
Number of row elements in column data
description : str or None
Full description of column
unit : str or None
Physical unit
format : str, None, or callable
Format string for outputting column values. This can be an
"old-style" (``format % value``) or "new-style" (`str.format`)
format specification string or a function or any callable object that
accepts a single value and returns a string.
meta : dict-like or None
Meta-data associated with the column
Examples
--------
A Column can be created in two different ways:
- Provide a ``data`` value but not ``shape`` or ``length`` (which are
inferred from the data).
Examples::
col = Column(data=[1, 2], name='name') # shape=(2,)
col = Column(data=[[1, 2], [3, 4]], name='name') # shape=(2, 2)
col = Column(data=[1, 2], name='name', dtype=float)
col = Column(data=np.array([1, 2]), name='name')
col = Column(data=['hello', 'world'], name='name')
The ``dtype`` argument can be any value which is an acceptable
fixed-size data-type initializer for the numpy.dtype() method. See
`<https://numpy.org/doc/stable/reference/arrays.dtypes.html>`_.
Examples include:
- Python non-string type (float, int, bool)
- Numpy non-string type (e.g. np.float32, np.int64, np.bool\\_)
- Numpy.dtype array-protocol type strings (e.g. 'i4', 'f8', 'S15')
If no ``dtype`` value is provide then the type is inferred using
``np.array(data)``.
- Provide ``length`` and optionally ``shape``, but not ``data``
Examples::
col = Column(name='name', length=5)
col = Column(name='name', dtype=int, length=10, shape=(3,4))
The default ``dtype`` is ``np.float64``. The ``shape`` argument is the
array shape of a single cell in the column.
To access the ``Column`` data as a raw `numpy.ndarray` object, you can use
one of the ``data`` or ``value`` attributes (which are equivalent)::
col.data
col.value
"""
def __new__(cls, data=None, name=None,
dtype=None, shape=(), length=0,
description=None, unit=None, format=None, meta=None,
copy=False, copy_indices=True):
if isinstance(data, MaskedColumn) and np.any(data.mask):
raise TypeError("Cannot convert a MaskedColumn with masked value to a Column")
self = super().__new__(
cls, data=data, name=name, dtype=dtype, shape=shape, length=length,
description=description, unit=unit, format=format, meta=meta,
copy=copy, copy_indices=copy_indices)
return self
def __setattr__(self, item, value):
if not isinstance(self, MaskedColumn) and item == "mask":
raise AttributeError("cannot set mask value to a column in non-masked Table")
super().__setattr__(item, value)
if item == 'unit' and issubclass(self.dtype.type, np.number):
try:
converted = self.parent_table._convert_col_for_table(self)
except AttributeError: # Either no parent table or parent table is None
pass
else:
if converted is not self:
self.parent_table.replace_column(self.name, converted)
def _base_repr_(self, html=False):
# If scalar then just convert to correct numpy type and use numpy repr
if self.ndim == 0:
return repr(self.item())
descr_vals = [self.__class__.__name__]
unit = None if self.unit is None else str(self.unit)
shape = None if self.ndim <= 1 else self.shape[1:]
for attr, val in (('name', self.name),
('dtype', dtype_info_name(self.dtype)),
('shape', shape),
('unit', unit),
('format', self.format),
('description', self.description),
('length', len(self))):
if val is not None:
descr_vals.append(f'{attr}={val!r}')
descr = '<' + ' '.join(descr_vals) + '>\n'
if html:
from astropy.utils.xml.writer import xml_escape
descr = xml_escape(descr)
data_lines, outs = self._formatter._pformat_col(
self, show_name=False, show_unit=False, show_length=False, html=html)
out = descr + '\n'.join(data_lines)
return out
def _repr_html_(self):
return self._base_repr_(html=True)
def __repr__(self):
return self._base_repr_(html=False)
def __str__(self):
# If scalar then just convert to correct numpy type and use numpy repr
if self.ndim == 0:
return str(self.item())
lines, outs = self._formatter._pformat_col(self)
return '\n'.join(lines)
def __bytes__(self):
return str(self).encode('utf-8')
def _check_string_truncate(self, value):
"""
Emit a warning if any elements of ``value`` will be truncated when
``value`` is assigned to self.
"""
# Convert input ``value`` to the string dtype of this column and
# find the length of the longest string in the array.
value = np.asanyarray(value, dtype=self.dtype.type)
if value.size == 0:
return
value_str_len = np.char.str_len(value).max()
# Parse the array-protocol typestring (e.g. '|U15') of self.dtype which
# has the character repeat count on the right side.
self_str_len = dtype_bytes_or_chars(self.dtype)
if value_str_len > self_str_len:
warnings.warn('truncated right side string(s) longer than {} '
'character(s) during assignment'
.format(self_str_len),
StringTruncateWarning,
stacklevel=3)
def __setitem__(self, index, value):
if self.dtype.char == 'S':
value = self._encode_str(value)
# Issue warning for string assignment that truncates ``value``
if issubclass(self.dtype.type, np.character):
self._check_string_truncate(value)
# update indices
self.info.adjust_indices(index, value, len(self))
# Set items using a view of the underlying data, as it gives an
# order-of-magnitude speed-up. [#2994]
self.data[index] = value
__eq__ = _make_compare('__eq__')
__ne__ = _make_compare('__ne__')
__gt__ = _make_compare('__gt__')
__lt__ = _make_compare('__lt__')
__ge__ = _make_compare('__ge__')
__le__ = _make_compare('__le__')
def insert(self, obj, values, axis=0):
"""
Insert values before the given indices in the column and return
a new `~astropy.table.Column` object.
Parameters
----------
obj : int, slice or sequence of int
Object that defines the index or indices before which ``values`` is
inserted.
values : array-like
Value(s) to insert. If the type of ``values`` is different from
that of the column, ``values`` is converted to the matching type.
``values`` should be shaped so that it can be broadcast appropriately.
axis : int, optional
Axis along which to insert ``values``. If ``axis`` is None then
the column array is flattened before insertion. Default is 0,
which will insert a row.
Returns
-------
out : `~astropy.table.Column`
A copy of column with ``values`` and ``mask`` inserted. Note that the
insertion does not occur in-place: a new column is returned.
"""
if self.dtype.kind == 'O':
# Even if values is array-like (e.g. [1,2,3]), insert as a single
# object. Numpy.insert instead inserts each element in an array-like
# input individually.
data = np.insert(self, obj, None, axis=axis)
data[obj] = values
else:
self_for_insert = _expand_string_array_for_values(self, values)
data = np.insert(self_for_insert, obj, values, axis=axis)
out = data.view(self.__class__)
out.__array_finalize__(self)
return out
# We do this to make the methods show up in the API docs
name = BaseColumn.name
unit = BaseColumn.unit
copy = BaseColumn.copy
more = BaseColumn.more
pprint = BaseColumn.pprint
pformat = BaseColumn.pformat
convert_unit_to = BaseColumn.convert_unit_to
quantity = BaseColumn.quantity
to = BaseColumn.to
class MaskedColumnInfo(ColumnInfo):
"""
Container for meta information like name, description, format.
This is required when the object is used as a mixin column within a table,
but can be used as a general way to store meta information. In this case
it just adds the ``mask_val`` attribute.
"""
# Add `serialize_method` attribute to the attrs that MaskedColumnInfo knows
# about. This allows customization of the way that MaskedColumn objects
# get written to file depending on format. The default is to use whatever
# the writer would normally do, which in the case of FITS or ECSV is to use
# a NULL value within the data itself. If serialize_method is 'data_mask'
# then the mask is explicitly written out as a separate column if there
# are any masked values. See also code below.
attr_names = ColumnInfo.attr_names | {'serialize_method'}
# When `serialize_method` is 'data_mask', and data and mask are being written
# as separate columns, use column names <name> and <name>.mask (instead
# of default encoding as <name>.data and <name>.mask).
_represent_as_dict_primary_data = 'data'
mask_val = np.ma.masked
def __init__(self, bound=False):
super().__init__(bound)
# If bound to a data object instance then create the dict of attributes
# which stores the info attribute values.
if bound:
# Specify how to serialize this object depending on context.
self.serialize_method = {'fits': 'null_value',
'ecsv': 'null_value',
'hdf5': 'data_mask',
'parquet': 'data_mask',
None: 'null_value'}
def _represent_as_dict(self):
out = super()._represent_as_dict()
# If we are a structured masked column, then our parent class,
# ColumnInfo, will already have set up a dict with masked parts,
# which will be serialized later, so no further work needed here.
if self._parent.dtype.names is not None:
return out
col = self._parent
# If the serialize method for this context (e.g. 'fits' or 'ecsv') is
# 'data_mask', that means to serialize using an explicit mask column.
method = self.serialize_method[self._serialize_context]
if method == 'data_mask':
# Note: a driver here is a performance issue in #8443 where repr() of a
# np.ma.MaskedArray value is up to 10 times slower than repr of a normal array
# value. So regardless of whether there are masked elements it is useful to
# explicitly define this as a serialized column and use col.data.data (ndarray)
# instead of letting it fall through to the "standard" serialization machinery.
out['data'] = col.data.data
if np.any(col.mask):
# Only if there are actually masked elements do we add the ``mask`` column
out['mask'] = col.mask
elif method == 'null_value':
pass
else:
raise ValueError('serialize method must be either "data_mask" or "null_value"')
return out
class MaskedColumn(Column, _MaskedColumnGetitemShim, ma.MaskedArray):
"""Define a masked data column for use in a Table object.
Parameters
----------
data : list, ndarray, or None
Column data values
name : str
Column name and key for reference within Table
mask : list, ndarray or None
Boolean mask for which True indicates missing or invalid data
fill_value : float, int, str, or None
Value used when filling masked column elements
dtype : `~numpy.dtype`-like
Data type for column
shape : tuple or ()
Dimensions of a single row element in the column data
length : int or 0
Number of row elements in column data
description : str or None
Full description of column
unit : str or None
Physical unit
format : str, None, or callable
Format string for outputting column values. This can be an
"old-style" (``format % value``) or "new-style" (`str.format`)
format specification string or a function or any callable object that
accepts a single value and returns a string.
meta : dict-like or None
Meta-data associated with the column
Examples
--------
A MaskedColumn is similar to a Column except that it includes ``mask`` and
``fill_value`` attributes. It can be created in two different ways:
- Provide a ``data`` value but not ``shape`` or ``length`` (which are
inferred from the data).
Examples::
col = MaskedColumn(data=[1, 2], name='name')
col = MaskedColumn(data=[1, 2], name='name', mask=[True, False])
col = MaskedColumn(data=[1, 2], name='name', dtype=float, fill_value=99)
The ``mask`` argument will be cast as a boolean array and specifies
which elements are considered to be missing or invalid.
The ``dtype`` argument can be any value which is an acceptable
fixed-size data-type initializer for the numpy.dtype() method. See
`<https://numpy.org/doc/stable/reference/arrays.dtypes.html>`_.
Examples include:
- Python non-string type (float, int, bool)
- Numpy non-string type (e.g. np.float32, np.int64, np.bool\\_)
- Numpy.dtype array-protocol type strings (e.g. 'i4', 'f8', 'S15')
If no ``dtype`` value is provide then the type is inferred using
``np.array(data)``. When ``data`` is provided then the ``shape``
and ``length`` arguments are ignored.
- Provide ``length`` and optionally ``shape``, but not ``data``
Examples::
col = MaskedColumn(name='name', length=5)
col = MaskedColumn(name='name', dtype=int, length=10, shape=(3,4))
The default ``dtype`` is ``np.float64``. The ``shape`` argument is the
array shape of a single cell in the column.
To access the ``Column`` data as a raw `numpy.ma.MaskedArray` object, you can
use one of the ``data`` or ``value`` attributes (which are equivalent)::
col.data
col.value
"""
info = MaskedColumnInfo()
def __new__(cls, data=None, name=None, mask=None, fill_value=None,
dtype=None, shape=(), length=0,
description=None, unit=None, format=None, meta=None,
copy=False, copy_indices=True):
if mask is None:
# If mask is None then we need to determine the mask (if any) from the data.
# The naive method is looking for a mask attribute on data, but this can fail,
# see #8816. Instead use ``MaskedArray`` to do the work.
mask = ma.MaskedArray(data).mask
if mask is np.ma.nomask:
# Handle odd-ball issue with np.ma.nomask (numpy #13758), and see below.
mask = False
elif copy:
mask = mask.copy()
elif mask is np.ma.nomask:
# Force the creation of a full mask array as nomask is tricky to
# use and will fail in an unexpected manner when setting a value
# to the mask.
mask = False
else:
mask = deepcopy(mask)
# Create self using MaskedArray as a wrapper class, following the example of
# class MSubArray in
# https://github.com/numpy/numpy/blob/maintenance/1.8.x/numpy/ma/tests/test_subclassing.py
# This pattern makes it so that __array_finalize__ is called as expected (e.g. #1471 and
# https://github.com/astropy/astropy/commit/ff6039e8)
# First just pass through all args and kwargs to BaseColumn, then wrap that object
# with MaskedArray.
self_data = BaseColumn(data, dtype=dtype, shape=shape, length=length, name=name,
unit=unit, format=format, description=description,
meta=meta, copy=copy, copy_indices=copy_indices)
self = ma.MaskedArray.__new__(cls, data=self_data, mask=mask)
# The above process preserves info relevant for Column, but this does
# not include serialize_method (and possibly other future attributes)
# relevant for MaskedColumn, so we set info explicitly.
if 'info' in getattr(data, '__dict__', {}):
self.info = data.info
# Note: do not set fill_value in the MaskedArray constructor because this does not
# go through the fill_value workarounds.
if fill_value is None:
data_fill_value = getattr(data, 'fill_value', None)
if (data_fill_value is not None
and data_fill_value != np.ma.default_fill_value(data.dtype)):
fill_value = np.array(data_fill_value, self.dtype)[()]
self.fill_value = fill_value
self.parent_table = None
# needs to be done here since self doesn't come from BaseColumn.__new__
for index in self.indices:
index.replace_col(self_data, self)
return self
@property
def fill_value(self):
return self.get_fill_value() # defer to native ma.MaskedArray method
@fill_value.setter
def fill_value(self, val):
"""Set fill value both in the masked column view and in the parent table
if it exists. Setting one or the other alone doesn't work."""
# another ma bug workaround: If the value of fill_value for a string array is
# requested but not yet set then it gets created as 'N/A'. From this point onward
# any new fill_values are truncated to 3 characters. Note that this does not
# occur if the masked array is a structured array (as in the previous block that
# deals with the parent table).
#
# >>> x = ma.array(['xxxx'])
# >>> x.fill_value # fill_value now gets represented as an 'S3' array
# 'N/A'
# >>> x.fill_value='yyyy'
# >>> x.fill_value
# 'yyy'
#
# To handle this we are forced to reset a private variable first:
self._fill_value = None
self.set_fill_value(val) # defer to native ma.MaskedArray method
@property
def data(self):
"""The plain MaskedArray data held by this column."""
out = self.view(np.ma.MaskedArray)
# By default, a MaskedArray view will set the _baseclass to be the
# same as that of our own class, i.e., BaseColumn. Since we want
# to return a plain MaskedArray, we reset the baseclass accordingly.
out._baseclass = np.ndarray
return out
def filled(self, fill_value=None):
"""Return a copy of self, with masked values filled with a given value.
Parameters
----------
fill_value : scalar; optional
The value to use for invalid entries (`None` by default). If
`None`, the ``fill_value`` attribute of the array is used
instead.
Returns
-------
filled_column : Column
A copy of ``self`` with masked entries replaced by `fill_value`
(be it the function argument or the attribute of ``self``).
"""
if fill_value is None:
fill_value = self.fill_value
data = super().filled(fill_value)
# Use parent table definition of Column if available
column_cls = self.parent_table.Column if (self.parent_table is not None) else Column
out = column_cls(name=self.name, data=data, unit=self.unit,
format=self.format, description=self.description,
meta=deepcopy(self.meta))
return out
def insert(self, obj, values, mask=None, axis=0):
"""
Insert values along the given axis before the given indices and return
a new `~astropy.table.MaskedColumn` object.
Parameters
----------
obj : int, slice or sequence of int
Object that defines the index or indices before which ``values`` is
inserted.
values : array-like
Value(s) to insert. If the type of ``values`` is different from
that of the column, ``values`` is converted to the matching type.
``values`` should be shaped so that it can be broadcast appropriately.
mask : bool or array-like
Mask value(s) to insert. If not supplied, and values does not have
a mask either, then False is used.
axis : int, optional
Axis along which to insert ``values``. If ``axis`` is None then
the column array is flattened before insertion. Default is 0,
which will insert a row.
Returns
-------
out : `~astropy.table.MaskedColumn`
A copy of column with ``values`` and ``mask`` inserted. Note that the
insertion does not occur in-place: a new masked column is returned.
"""
self_ma = self.data # self viewed as MaskedArray
if self.dtype.kind == 'O':
# Even if values is array-like (e.g. [1,2,3]), insert as a single
# object. Numpy.insert instead inserts each element in an array-like
# input individually.
new_data = np.insert(self_ma.data, obj, None, axis=axis)
new_data[obj] = values
else:
self_ma = _expand_string_array_for_values(self_ma, values)
new_data = np.insert(self_ma.data, obj, values, axis=axis)
if mask is None:
mask = getattr(values, 'mask', np.ma.nomask)
if mask is np.ma.nomask:
if self.dtype.kind == 'O':
mask = False
else:
mask = np.zeros(np.shape(values), dtype=bool)
new_mask = np.insert(self_ma.mask, obj, mask, axis=axis)
new_ma = np.ma.array(new_data, mask=new_mask, copy=False)
out = new_ma.view(self.__class__)
out.parent_table = None
out.indices = []
out._copy_attrs(self)
out.fill_value = self.fill_value
return out
def _copy_attrs_slice(self, out):
# Fixes issue #3023: when calling getitem with a MaskedArray subclass
# the original object attributes are not copied.
if out.__class__ is self.__class__:
# TODO: this part is essentially the same as what is done in
# __array_finalize__ and could probably be called directly in our
# override of __getitem__ in _columns_mixins.pyx). Refactor?
if 'info' in self.__dict__:
out.info = self.info
out.parent_table = None
# we need this because __getitem__ does a shallow copy of indices
if out.indices is self.indices:
out.indices = []
out._copy_attrs(self)
return out
def __setitem__(self, index, value):
# Issue warning for string assignment that truncates ``value``
if self.dtype.char == 'S':
value = self._encode_str(value)
if issubclass(self.dtype.type, np.character):
# Account for a bug in np.ma.MaskedArray setitem.
# https://github.com/numpy/numpy/issues/8624
value = np.ma.asanyarray(value, dtype=self.dtype.type)
# Check for string truncation after filling masked items with
# empty (zero-length) string. Note that filled() does not make
# a copy if there are no masked items.
self._check_string_truncate(value.filled(''))
# update indices
self.info.adjust_indices(index, value, len(self))
ma.MaskedArray.__setitem__(self, index, value)
# We do this to make the methods show up in the API docs
name = BaseColumn.name
copy = BaseColumn.copy
more = BaseColumn.more
pprint = BaseColumn.pprint
pformat = BaseColumn.pformat
convert_unit_to = BaseColumn.convert_unit_to
|
c3fd7d4698c09f396b842a127f3ca38c0fd79ea8b72a67442031a802f3aa27b0 | """
High-level table operations:
- join()
- setdiff()
- hstack()
- vstack()
- dstack()
"""
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from copy import deepcopy
import collections
import itertools
from collections import OrderedDict, Counter
from collections.abc import Mapping, Sequence
import numpy as np
from astropy.utils import metadata
from astropy.utils.masked import Masked
from .table import Table, QTable, Row, Column, MaskedColumn
from astropy.units import Quantity
from . import _np_utils
from .np_utils import TableMergeError
__all__ = ['join', 'setdiff', 'hstack', 'vstack', 'unique',
'join_skycoord', 'join_distance']
__doctest_requires__ = {'join_skycoord': ['scipy'], 'join_distance': ['scipy']}
def _merge_table_meta(out, tables, metadata_conflicts='warn'):
out_meta = deepcopy(tables[0].meta)
for table in tables[1:]:
out_meta = metadata.merge(out_meta, table.meta, metadata_conflicts=metadata_conflicts)
out.meta.update(out_meta)
def _get_list_of_tables(tables):
"""
Check that tables is a Table or sequence of Tables. Returns the
corresponding list of Tables.
"""
# Make sure we have a list of things
if not isinstance(tables, Sequence):
tables = [tables]
# Make sure there is something to stack
if len(tables) == 0:
raise ValueError('no values provided to stack.')
# Convert inputs (Table, Row, or anything column-like) to Tables.
# Special case that Quantity converts to a QTable.
for ii, val in enumerate(tables):
if isinstance(val, Table):
pass
elif isinstance(val, Row):
tables[ii] = Table(val)
elif isinstance(val, Quantity):
tables[ii] = QTable([val])
else:
try:
tables[ii] = Table([val])
except (ValueError, TypeError) as err:
raise TypeError(f'Cannot convert {val} to table column.') from err
return tables
def _get_out_class(objs):
"""
From a list of input objects ``objs`` get merged output object class.
This is just taken as the deepest subclass. This doesn't handle complicated
inheritance schemes, but as a special case, classes which share ``info``
are taken to be compatible.
"""
out_class = objs[0].__class__
for obj in objs[1:]:
if issubclass(obj.__class__, out_class):
out_class = obj.__class__
if any(not (issubclass(out_class, obj.__class__)
or out_class.info is obj.__class__.info) for obj in objs):
raise ValueError('unmergeable object classes {}'
.format([obj.__class__.__name__ for obj in objs]))
return out_class
def join_skycoord(distance, distance_func='search_around_sky'):
"""Helper function to join on SkyCoord columns using distance matching.
This function is intended for use in ``table.join()`` to allow performing a
table join where the key columns are both ``SkyCoord`` objects, matched by
computing the distance between points and accepting values below
``distance``.
The distance cross-matching is done using either
`~astropy.coordinates.search_around_sky` or
`~astropy.coordinates.search_around_3d`, depending on the value of
``distance_func``. The default is ``'search_around_sky'``.
One can also provide a function object for ``distance_func``, in which case
it must be a function that follows the same input and output API as
`~astropy.coordinates.search_around_sky`. In this case the function will
be called with ``(skycoord1, skycoord2, distance)`` as arguments.
Parameters
----------
distance : `~astropy.units.Quantity` ['angle', 'length']
Maximum distance between points to be considered a join match.
Must have angular or distance units.
distance_func : str or function
Specifies the function for performing the cross-match based on
``distance``. If supplied as a string this specifies the name of a
function in `astropy.coordinates`. If supplied as a function then that
function is called directly.
Returns
-------
join_func : function
Function that accepts two ``SkyCoord`` columns (col1, col2) and returns
the tuple (ids1, ids2) of pair-matched unique identifiers.
Examples
--------
This example shows an inner join of two ``SkyCoord`` columns, taking any
sources within 0.2 deg to be a match. Note the new ``sc_id`` column which
is added and provides a unique source identifier for the matches.
>>> from astropy.coordinates import SkyCoord
>>> import astropy.units as u
>>> from astropy.table import Table, join_skycoord
>>> from astropy import table
>>> sc1 = SkyCoord([0, 1, 1.1, 2], [0, 0, 0, 0], unit='deg')
>>> sc2 = SkyCoord([0.5, 1.05, 2.1], [0, 0, 0], unit='deg')
>>> join_func = join_skycoord(0.2 * u.deg)
>>> join_func(sc1, sc2) # Associate each coordinate with unique source ID
(array([3, 1, 1, 2]), array([4, 1, 2]))
>>> t1 = Table([sc1], names=['sc'])
>>> t2 = Table([sc2], names=['sc'])
>>> t12 = table.join(t1, t2, join_funcs={'sc': join_skycoord(0.2 * u.deg)})
>>> print(t12) # Note new `sc_id` column with the IDs from join_func()
sc_id sc_1 sc_2
deg,deg deg,deg
----- ------- --------
1 1.0,0.0 1.05,0.0
1 1.1,0.0 1.05,0.0
2 2.0,0.0 2.1,0.0
"""
if isinstance(distance_func, str):
import astropy.coordinates as coords
try:
distance_func = getattr(coords, distance_func)
except AttributeError as err:
raise ValueError('distance_func must be a function in astropy.coordinates') from err
else:
from inspect import isfunction
if not isfunction(distance_func):
raise ValueError('distance_func must be a str or function')
def join_func(sc1, sc2):
# Call the appropriate SkyCoord method to find pairs within distance
idxs1, idxs2, d2d, d3d = distance_func(sc1, sc2, distance)
# Now convert that into unique identifiers for each near-pair. This is
# taken to be transitive, so that if points 1 and 2 are "near" and points
# 1 and 3 are "near", then 1, 2, and 3 are all given the same identifier.
# This identifier will then be used in the table join matching.
# Identifiers for each column, initialized to all zero.
ids1 = np.zeros(len(sc1), dtype=int)
ids2 = np.zeros(len(sc2), dtype=int)
# Start the identifier count at 1
id_ = 1
for idx1, idx2 in zip(idxs1, idxs2):
# If this col1 point is previously identified then set corresponding
# col2 point to same identifier. Likewise for col2 and col1.
if ids1[idx1] > 0:
ids2[idx2] = ids1[idx1]
elif ids2[idx2] > 0:
ids1[idx1] = ids2[idx2]
else:
# Not yet seen so set identifier for col1 and col2
ids1[idx1] = id_
ids2[idx2] = id_
id_ += 1
# Fill in unique identifiers for points with no near neighbor
for ids in (ids1, ids2):
for idx in np.flatnonzero(ids == 0):
ids[idx] = id_
id_ += 1
# End of enclosure join_func()
return ids1, ids2
return join_func
def join_distance(distance, kdtree_args=None, query_args=None):
"""Helper function to join table columns using distance matching.
This function is intended for use in ``table.join()`` to allow performing
a table join where the key columns are matched by computing the distance
between points and accepting values below ``distance``. This numerical
"fuzzy" match can apply to 1-D or 2-D columns, where in the latter case
the distance is a vector distance.
The distance cross-matching is done using `scipy.spatial.cKDTree`. If
necessary you can tweak the default behavior by providing ``dict`` values
for the ``kdtree_args`` or ``query_args``.
Parameters
----------
distance : float or `~astropy.units.Quantity` ['length']
Maximum distance between points to be considered a join match
kdtree_args : dict, None
Optional extra args for `~scipy.spatial.cKDTree`
query_args : dict, None
Optional extra args for `~scipy.spatial.cKDTree.query_ball_tree`
Returns
-------
join_func : function
Function that accepts (skycoord1, skycoord2) and returns the tuple
(ids1, ids2) of pair-matched unique identifiers.
Examples
--------
>>> from astropy.table import Table, join_distance
>>> from astropy import table
>>> c1 = [0, 1, 1.1, 2]
>>> c2 = [0.5, 1.05, 2.1]
>>> t1 = Table([c1], names=['col'])
>>> t2 = Table([c2], names=['col'])
>>> t12 = table.join(t1, t2, join_type='outer', join_funcs={'col': join_distance(0.2)})
>>> print(t12)
col_id col_1 col_2
------ ----- -----
1 1.0 1.05
1 1.1 1.05
2 2.0 2.1
3 0.0 --
4 -- 0.5
"""
try:
from scipy.spatial import cKDTree
except ImportError as exc:
raise ImportError('scipy is required to use join_distance()') from exc
if kdtree_args is None:
kdtree_args = {}
if query_args is None:
query_args = {}
def join_func(col1, col2):
if col1.ndim > 2 or col2.ndim > 2:
raise ValueError('columns for isclose_join must be 1- or 2-dimensional')
if isinstance(distance, Quantity):
# Convert to np.array with common unit
col1 = col1.to_value(distance.unit)
col2 = col2.to_value(distance.unit)
dist = distance.value
else:
# Convert to np.array to allow later in-place shape changing
col1 = np.asarray(col1)
col2 = np.asarray(col2)
dist = distance
# Ensure columns are pure np.array and are 2-D for use with KDTree
if col1.ndim == 1:
col1.shape = col1.shape + (1,)
if col2.ndim == 1:
col2.shape = col2.shape + (1,)
# Cross-match col1 and col2 within dist using KDTree
kd1 = cKDTree(col1, **kdtree_args)
kd2 = cKDTree(col2, **kdtree_args)
nears = kd1.query_ball_tree(kd2, r=dist, **query_args)
# Output of above is nears which is a list of lists, where the outer
# list corresponds to each item in col1, and where the inner lists are
# indexes into col2 of elements within the distance tolerance. This
# identifies col1 / col2 near pairs.
# Now convert that into unique identifiers for each near-pair. This is
# taken to be transitive, so that if points 1 and 2 are "near" and points
# 1 and 3 are "near", then 1, 2, and 3 are all given the same identifier.
# This identifier will then be used in the table join matching.
# Identifiers for each column, initialized to all zero.
ids1 = np.zeros(len(col1), dtype=int)
ids2 = np.zeros(len(col2), dtype=int)
# Start the identifier count at 1
id_ = 1
for idx1, idxs2 in enumerate(nears):
for idx2 in idxs2:
# If this col1 point is previously identified then set corresponding
# col2 point to same identifier. Likewise for col2 and col1.
if ids1[idx1] > 0:
ids2[idx2] = ids1[idx1]
elif ids2[idx2] > 0:
ids1[idx1] = ids2[idx2]
else:
# Not yet seen so set identifier for col1 and col2
ids1[idx1] = id_
ids2[idx2] = id_
id_ += 1
# Fill in unique identifiers for points with no near neighbor
for ids in (ids1, ids2):
for idx in np.flatnonzero(ids == 0):
ids[idx] = id_
id_ += 1
# End of enclosure join_func()
return ids1, ids2
return join_func
def join(left, right, keys=None, join_type='inner', *,
keys_left=None, keys_right=None,
uniq_col_name='{col_name}_{table_name}',
table_names=['1', '2'], metadata_conflicts='warn',
join_funcs=None):
"""
Perform a join of the left table with the right table on specified keys.
Parameters
----------
left : `~astropy.table.Table`-like object
Left side table in the join. If not a Table, will call ``Table(left)``
right : `~astropy.table.Table`-like object
Right side table in the join. If not a Table, will call ``Table(right)``
keys : str or list of str
Name(s) of column(s) used to match rows of left and right tables.
Default is to use all columns which are common to both tables.
join_type : str
Join type ('inner' | 'outer' | 'left' | 'right' | 'cartesian'), default is 'inner'
keys_left : str or list of str or list of column-like, optional
Left column(s) used to match rows instead of ``keys`` arg. This can be
be a single left table column name or list of column names, or a list of
column-like values with the same lengths as the left table.
keys_right : str or list of str or list of column-like, optional
Same as ``keys_left``, but for the right side of the join.
uniq_col_name : str or None
String generate a unique output column name in case of a conflict.
The default is '{col_name}_{table_name}'.
table_names : list of str or None
Two-element list of table names used when generating unique output
column names. The default is ['1', '2'].
metadata_conflicts : str
How to proceed with metadata conflicts. This should be one of:
* ``'silent'``: silently pick the last conflicting meta-data value
* ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default)
* ``'error'``: raise an exception.
join_funcs : dict, None
Dict of functions to use for matching the corresponding key column(s).
See `~astropy.table.join_skycoord` for an example and details.
Returns
-------
joined_table : `~astropy.table.Table` object
New table containing the result of the join operation.
"""
# Try converting inputs to Table as needed
if not isinstance(left, Table):
left = Table(left)
if not isinstance(right, Table):
right = Table(right)
col_name_map = OrderedDict()
out = _join(left, right, keys, join_type,
uniq_col_name, table_names, col_name_map, metadata_conflicts,
join_funcs,
keys_left=keys_left, keys_right=keys_right)
# Merge the column and table meta data. Table subclasses might override
# these methods for custom merge behavior.
_merge_table_meta(out, [left, right], metadata_conflicts=metadata_conflicts)
return out
def setdiff(table1, table2, keys=None):
"""
Take a set difference of table rows.
The row set difference will contain all rows in ``table1`` that are not
present in ``table2``. If the keys parameter is not defined, all columns in
``table1`` will be included in the output table.
Parameters
----------
table1 : `~astropy.table.Table`
``table1`` is on the left side of the set difference.
table2 : `~astropy.table.Table`
``table2`` is on the right side of the set difference.
keys : str or list of str
Name(s) of column(s) used to match rows of left and right tables.
Default is to use all columns in ``table1``.
Returns
-------
diff_table : `~astropy.table.Table`
New table containing the set difference between tables. If the set
difference is none, an empty table will be returned.
Examples
--------
To get a set difference between two tables::
>>> from astropy.table import setdiff, Table
>>> t1 = Table({'a': [1, 4, 9], 'b': ['c', 'd', 'f']}, names=('a', 'b'))
>>> t2 = Table({'a': [1, 5, 9], 'b': ['c', 'b', 'f']}, names=('a', 'b'))
>>> print(t1)
a b
--- ---
1 c
4 d
9 f
>>> print(t2)
a b
--- ---
1 c
5 b
9 f
>>> print(setdiff(t1, t2))
a b
--- ---
4 d
>>> print(setdiff(t2, t1))
a b
--- ---
5 b
"""
if keys is None:
keys = table1.colnames
# Check that all keys are in table1 and table2
for tbl, tbl_str in ((table1, 'table1'), (table2, 'table2')):
diff_keys = np.setdiff1d(keys, tbl.colnames)
if len(diff_keys) != 0:
raise ValueError("The {} columns are missing from {}, cannot take "
"a set difference.".format(diff_keys, tbl_str))
# Make a light internal copy of both tables
t1 = table1.copy(copy_data=False)
t1.meta = {}
t1.keep_columns(keys)
t1['__index1__'] = np.arange(len(table1)) # Keep track of rows indices
# Make a light internal copy to avoid touching table2
t2 = table2.copy(copy_data=False)
t2.meta = {}
t2.keep_columns(keys)
# Dummy column to recover rows after join
t2['__index2__'] = np.zeros(len(t2), dtype=np.uint8) # dummy column
t12 = _join(t1, t2, join_type='left', keys=keys,
metadata_conflicts='silent')
# If t12 index2 is masked then that means some rows were in table1 but not table2.
if hasattr(t12['__index2__'], 'mask'):
# Define bool mask of table1 rows not in table2
diff = t12['__index2__'].mask
# Get the row indices of table1 for those rows
idx = t12['__index1__'][diff]
# Select corresponding table1 rows straight from table1 to ensure
# correct table and column types.
t12_diff = table1[idx]
else:
t12_diff = table1[[]]
return t12_diff
def dstack(tables, join_type='outer', metadata_conflicts='warn'):
"""
Stack columns within tables depth-wise
A ``join_type`` of 'exact' means that the tables must all have exactly
the same column names (though the order can vary). If ``join_type``
is 'inner' then the intersection of common columns will be the output.
A value of 'outer' (default) means the output will have the union of
all columns, with table values being masked where no common values are
available.
Parameters
----------
tables : `~astropy.table.Table` or `~astropy.table.Row` or list thereof
Table(s) to stack along depth-wise with the current table
Table columns should have same shape and name for depth-wise stacking
join_type : str
Join type ('inner' | 'exact' | 'outer'), default is 'outer'
metadata_conflicts : str
How to proceed with metadata conflicts. This should be one of:
* ``'silent'``: silently pick the last conflicting meta-data value
* ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default)
* ``'error'``: raise an exception.
Returns
-------
stacked_table : `~astropy.table.Table` object
New table containing the stacked data from the input tables.
Examples
--------
To stack two tables along rows do::
>>> from astropy.table import dstack, Table
>>> t1 = Table({'a': [1., 2.], 'b': [3., 4.]}, names=('a', 'b'))
>>> t2 = Table({'a': [5., 6.], 'b': [7., 8.]}, names=('a', 'b'))
>>> print(t1)
a b
--- ---
1.0 3.0
2.0 4.0
>>> print(t2)
a b
--- ---
5.0 7.0
6.0 8.0
>>> print(dstack([t1, t2]))
a b
---------- ----------
1.0 .. 5.0 3.0 .. 7.0
2.0 .. 6.0 4.0 .. 8.0
"""
_check_join_type(join_type, 'dstack')
tables = _get_list_of_tables(tables)
if len(tables) == 1:
return tables[0] # no point in stacking a single table
n_rows = {len(table) for table in tables}
if len(n_rows) != 1:
raise ValueError('Table lengths must all match for dstack')
n_row = n_rows.pop()
out = vstack(tables, join_type, metadata_conflicts)
for name, col in out.columns.items():
col = out[name]
# Reshape to so each original column is now in a row.
# If entries are not 0-dim then those additional shape dims
# are just carried along.
# [x x x y y y] => [[x x x],
# [y y y]]
new_shape = (len(tables), n_row) + col.shape[1:]
try:
col.shape = (len(tables), n_row) + col.shape[1:]
except AttributeError:
col = col.reshape(new_shape)
# Transpose the table and row axes to get to
# [[x, y],
# [x, y]
# [x, y]]
axes = np.arange(len(col.shape))
axes[:2] = [1, 0]
# This temporarily makes `out` be corrupted (columns of different
# length) but it all works out in the end.
out.columns.__setitem__(name, col.transpose(axes), validated=True)
return out
def vstack(tables, join_type='outer', metadata_conflicts='warn'):
"""
Stack tables vertically (along rows)
A ``join_type`` of 'exact' means that the tables must all have exactly
the same column names (though the order can vary). If ``join_type``
is 'inner' then the intersection of common columns will be the output.
A value of 'outer' (default) means the output will have the union of
all columns, with table values being masked where no common values are
available.
Parameters
----------
tables : `~astropy.table.Table` or `~astropy.table.Row` or list thereof
Table(s) to stack along rows (vertically) with the current table
join_type : str
Join type ('inner' | 'exact' | 'outer'), default is 'outer'
metadata_conflicts : str
How to proceed with metadata conflicts. This should be one of:
* ``'silent'``: silently pick the last conflicting meta-data value
* ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default)
* ``'error'``: raise an exception.
Returns
-------
stacked_table : `~astropy.table.Table` object
New table containing the stacked data from the input tables.
Examples
--------
To stack two tables along rows do::
>>> from astropy.table import vstack, Table
>>> t1 = Table({'a': [1, 2], 'b': [3, 4]}, names=('a', 'b'))
>>> t2 = Table({'a': [5, 6], 'b': [7, 8]}, names=('a', 'b'))
>>> print(t1)
a b
--- ---
1 3
2 4
>>> print(t2)
a b
--- ---
5 7
6 8
>>> print(vstack([t1, t2]))
a b
--- ---
1 3
2 4
5 7
6 8
"""
_check_join_type(join_type, 'vstack')
tables = _get_list_of_tables(tables) # validates input
if len(tables) == 1:
return tables[0] # no point in stacking a single table
col_name_map = OrderedDict()
out = _vstack(tables, join_type, col_name_map, metadata_conflicts)
# Merge table metadata
_merge_table_meta(out, tables, metadata_conflicts=metadata_conflicts)
return out
def hstack(tables, join_type='outer',
uniq_col_name='{col_name}_{table_name}', table_names=None,
metadata_conflicts='warn'):
"""
Stack tables along columns (horizontally)
A ``join_type`` of 'exact' means that the tables must all
have exactly the same number of rows. If ``join_type`` is 'inner' then
the intersection of rows will be the output. A value of 'outer' (default)
means the output will have the union of all rows, with table values being
masked where no common values are available.
Parameters
----------
tables : `~astropy.table.Table` or `~astropy.table.Row` or list thereof
Tables to stack along columns (horizontally) with the current table
join_type : str
Join type ('inner' | 'exact' | 'outer'), default is 'outer'
uniq_col_name : str or None
String generate a unique output column name in case of a conflict.
The default is '{col_name}_{table_name}'.
table_names : list of str or None
Two-element list of table names used when generating unique output
column names. The default is ['1', '2', ..].
metadata_conflicts : str
How to proceed with metadata conflicts. This should be one of:
* ``'silent'``: silently pick the last conflicting meta-data value
* ``'warn'``: pick the last conflicting meta-data value,
but emit a warning (default)
* ``'error'``: raise an exception.
Returns
-------
stacked_table : `~astropy.table.Table` object
New table containing the stacked data from the input tables.
See Also
--------
Table.add_columns, Table.replace_column, Table.update
Examples
--------
To stack two tables horizontally (along columns) do::
>>> from astropy.table import Table, hstack
>>> t1 = Table({'a': [1, 2], 'b': [3, 4]}, names=('a', 'b'))
>>> t2 = Table({'c': [5, 6], 'd': [7, 8]}, names=('c', 'd'))
>>> print(t1)
a b
--- ---
1 3
2 4
>>> print(t2)
c d
--- ---
5 7
6 8
>>> print(hstack([t1, t2]))
a b c d
--- --- --- ---
1 3 5 7
2 4 6 8
"""
_check_join_type(join_type, 'hstack')
tables = _get_list_of_tables(tables) # validates input
if len(tables) == 1:
return tables[0] # no point in stacking a single table
col_name_map = OrderedDict()
out = _hstack(tables, join_type, uniq_col_name, table_names,
col_name_map)
_merge_table_meta(out, tables, metadata_conflicts=metadata_conflicts)
return out
def unique(input_table, keys=None, silent=False, keep='first'):
"""
Returns the unique rows of a table.
Parameters
----------
input_table : table-like
keys : str or list of str
Name(s) of column(s) used to create unique rows.
Default is to use all columns.
keep : {'first', 'last', 'none'}
Whether to keep the first or last row for each set of
duplicates. If 'none', all rows that are duplicate are
removed, leaving only rows that are already unique in
the input.
Default is 'first'.
silent : bool
If `True`, masked value column(s) are silently removed from
``keys``. If `False`, an exception is raised when ``keys``
contains masked value column(s).
Default is `False`.
Returns
-------
unique_table : `~astropy.table.Table` object
New table containing only the unique rows of ``input_table``.
Examples
--------
>>> from astropy.table import unique, Table
>>> import numpy as np
>>> table = Table(data=[[1,2,3,2,3,3],
... [2,3,4,5,4,6],
... [3,4,5,6,7,8]],
... names=['col1', 'col2', 'col3'],
... dtype=[np.int32, np.int32, np.int32])
>>> table
<Table length=6>
col1 col2 col3
int32 int32 int32
----- ----- -----
1 2 3
2 3 4
3 4 5
2 5 6
3 4 7
3 6 8
>>> unique(table, keys='col1')
<Table length=3>
col1 col2 col3
int32 int32 int32
----- ----- -----
1 2 3
2 3 4
3 4 5
>>> unique(table, keys=['col1'], keep='last')
<Table length=3>
col1 col2 col3
int32 int32 int32
----- ----- -----
1 2 3
2 5 6
3 6 8
>>> unique(table, keys=['col1', 'col2'])
<Table length=5>
col1 col2 col3
int32 int32 int32
----- ----- -----
1 2 3
2 3 4
2 5 6
3 4 5
3 6 8
>>> unique(table, keys=['col1', 'col2'], keep='none')
<Table length=4>
col1 col2 col3
int32 int32 int32
----- ----- -----
1 2 3
2 3 4
2 5 6
3 6 8
>>> unique(table, keys=['col1'], keep='none')
<Table length=1>
col1 col2 col3
int32 int32 int32
----- ----- -----
1 2 3
"""
if keep not in ('first', 'last', 'none'):
raise ValueError("'keep' should be one of 'first', 'last', 'none'")
if isinstance(keys, str):
keys = [keys]
if keys is None:
keys = input_table.colnames
else:
if len(set(keys)) != len(keys):
raise ValueError("duplicate key names")
# Check for columns with masked values
for key in keys[:]:
col = input_table[key]
if hasattr(col, 'mask') and np.any(col.mask):
if not silent:
raise ValueError(
"cannot use columns with masked values as keys; "
"remove column '{}' from keys and rerun "
"unique()".format(key))
del keys[keys.index(key)]
if len(keys) == 0:
raise ValueError("no column remained in ``keys``; "
"unique() cannot work with masked value "
"key columns")
grouped_table = input_table.group_by(keys)
indices = grouped_table.groups.indices
if keep == 'first':
indices = indices[:-1]
elif keep == 'last':
indices = indices[1:] - 1
else:
indices = indices[:-1][np.diff(indices) == 1]
return grouped_table[indices]
def get_col_name_map(arrays, common_names, uniq_col_name='{col_name}_{table_name}',
table_names=None):
"""
Find the column names mapping when merging the list of tables
``arrays``. It is assumed that col names in ``common_names`` are to be
merged into a single column while the rest will be uniquely represented
in the output. The args ``uniq_col_name`` and ``table_names`` specify
how to rename columns in case of conflicts.
Returns a dict mapping each output column name to the input(s). This takes the form
{outname : (col_name_0, col_name_1, ...), ... }. For key columns all of input names
will be present, while for the other non-key columns the value will be (col_name_0,
None, ..) or (None, col_name_1, ..) etc.
"""
col_name_map = collections.defaultdict(lambda: [None] * len(arrays))
col_name_list = []
if table_names is None:
table_names = [str(ii + 1) for ii in range(len(arrays))]
for idx, array in enumerate(arrays):
table_name = table_names[idx]
for name in array.colnames:
out_name = name
if name in common_names:
# If name is in the list of common_names then insert into
# the column name list, but just once.
if name not in col_name_list:
col_name_list.append(name)
else:
# If name is not one of the common column outputs, and it collides
# with the names in one of the other arrays, then rename
others = list(arrays)
others.pop(idx)
if any(name in other.colnames for other in others):
out_name = uniq_col_name.format(table_name=table_name, col_name=name)
col_name_list.append(out_name)
col_name_map[out_name][idx] = name
# Check for duplicate output column names
col_name_count = Counter(col_name_list)
repeated_names = [name for name, count in col_name_count.items() if count > 1]
if repeated_names:
raise TableMergeError('Merging column names resulted in duplicates: {}. '
'Change uniq_col_name or table_names args to fix this.'
.format(repeated_names))
# Convert col_name_map to a regular dict with tuple (immutable) values
col_name_map = OrderedDict((name, col_name_map[name]) for name in col_name_list)
return col_name_map
def get_descrs(arrays, col_name_map):
"""
Find the dtypes descrs resulting from merging the list of arrays' dtypes,
using the column name mapping ``col_name_map``.
Return a list of descrs for the output.
"""
out_descrs = []
for out_name, in_names in col_name_map.items():
# List of input arrays that contribute to this output column
in_cols = [arr[name] for arr, name in zip(arrays, in_names) if name is not None]
# List of names of the columns that contribute to this output column.
names = [name for name in in_names if name is not None]
# Output dtype is the superset of all dtypes in in_arrays
try:
dtype = common_dtype(in_cols)
except TableMergeError as tme:
# Beautify the error message when we are trying to merge columns with incompatible
# types by including the name of the columns that originated the error.
raise TableMergeError("The '{}' columns have incompatible types: {}"
.format(names[0], tme._incompat_types)) from tme
# Make sure all input shapes are the same
uniq_shapes = {col.shape[1:] for col in in_cols}
if len(uniq_shapes) != 1:
raise TableMergeError(f'Key columns {names!r} have different shape')
shape = uniq_shapes.pop()
if out_name is not None:
out_name = str(out_name)
out_descrs.append((out_name, dtype, shape))
return out_descrs
def common_dtype(cols):
"""
Use numpy to find the common dtype for a list of columns.
Only allow columns within the following fundamental numpy data types:
np.bool_, np.object_, np.number, np.character, np.void
"""
try:
return metadata.common_dtype(cols)
except metadata.MergeConflictError as err:
tme = TableMergeError(f'Columns have incompatible types {err._incompat_types}')
tme._incompat_types = err._incompat_types
raise tme from err
def _get_join_sort_idxs(keys, left, right):
# Go through each of the key columns in order and make columns for
# a new structured array that represents the lexical ordering of those
# key columns. This structured array is then argsort'ed. The trick here
# is that some columns (e.g. Time) may need to be expanded into multiple
# columns for ordering here.
ii = 0 # Index for uniquely naming the sort columns
sort_keys_dtypes = [] # sortable_table dtypes as list of (name, dtype_str, shape) tuples
sort_keys = [] # sortable_table (structured ndarray) column names
sort_left = {} # sortable ndarrays from left table
sort_right = {} # sortable ndarray from right table
for key in keys:
# get_sortable_arrays() returns a list of ndarrays that can be lexically
# sorted to represent the order of the column. In most cases this is just
# a single element of the column itself.
left_sort_cols = left[key].info.get_sortable_arrays()
right_sort_cols = right[key].info.get_sortable_arrays()
if len(left_sort_cols) != len(right_sort_cols):
# Should never happen because cols are screened beforehand for compatibility
raise RuntimeError('mismatch in sort cols lengths')
for left_sort_col, right_sort_col in zip(left_sort_cols, right_sort_cols):
# Check for consistency of shapes. Mismatch should never happen.
shape = left_sort_col.shape[1:]
if shape != right_sort_col.shape[1:]:
raise RuntimeError('mismatch in shape of left vs. right sort array')
if shape != ():
raise ValueError(f'sort key column {key!r} must be 1-d')
sort_key = str(ii)
sort_keys.append(sort_key)
sort_left[sort_key] = left_sort_col
sort_right[sort_key] = right_sort_col
# Build up dtypes for the structured array that gets sorted.
dtype_str = common_dtype([left_sort_col, right_sort_col])
sort_keys_dtypes.append((sort_key, dtype_str))
ii += 1
# Make the empty sortable table and fill it
len_left = len(left)
sortable_table = np.empty(len_left + len(right), dtype=sort_keys_dtypes)
for key in sort_keys:
sortable_table[key][:len_left] = sort_left[key]
sortable_table[key][len_left:] = sort_right[key]
# Finally do the (lexical) argsort and make a new sorted version
idx_sort = sortable_table.argsort(order=sort_keys)
sorted_table = sortable_table[idx_sort]
# Get indexes of unique elements (i.e. the group boundaries)
diffs = np.concatenate(([True], sorted_table[1:] != sorted_table[:-1], [True]))
idxs = np.flatnonzero(diffs)
return idxs, idx_sort
def _apply_join_funcs(left, right, keys, join_funcs):
"""Apply join_funcs
"""
# Make light copies of left and right, then add new index columns.
left = left.copy(copy_data=False)
right = right.copy(copy_data=False)
for key, join_func in join_funcs.items():
ids1, ids2 = join_func(left[key], right[key])
# Define a unique id_key name, and keep adding underscores until we have
# a name not yet present.
id_key = key + '_id'
while id_key in left.columns or id_key in right.columns:
id_key = id_key[:-2] + '_id'
keys = tuple(id_key if orig_key == key else orig_key for orig_key in keys)
left.add_column(ids1, index=0, name=id_key) # [id_key] = ids1
right.add_column(ids2, index=0, name=id_key) # [id_key] = ids2
return left, right, keys
def _join(left, right, keys=None, join_type='inner',
uniq_col_name='{col_name}_{table_name}',
table_names=['1', '2'],
col_name_map=None, metadata_conflicts='warn',
join_funcs=None,
keys_left=None, keys_right=None):
"""
Perform a join of the left and right Tables on specified keys.
Parameters
----------
left : Table
Left side table in the join
right : Table
Right side table in the join
keys : str or list of str
Name(s) of column(s) used to match rows of left and right tables.
Default is to use all columns which are common to both tables.
join_type : str
Join type ('inner' | 'outer' | 'left' | 'right' | 'cartesian'), default is 'inner'
uniq_col_name : str or None
String generate a unique output column name in case of a conflict.
The default is '{col_name}_{table_name}'.
table_names : list of str or None
Two-element list of table names used when generating unique output
column names. The default is ['1', '2'].
col_name_map : empty dict or None
If passed as a dict then it will be updated in-place with the
mapping of output to input column names.
metadata_conflicts : str
How to proceed with metadata conflicts. This should be one of:
* ``'silent'``: silently pick the last conflicting meta-data value
* ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default)
* ``'error'``: raise an exception.
join_funcs : dict, None
Dict of functions to use for matching the corresponding key column(s).
See `~astropy.table.join_skycoord` for an example and details.
Returns
-------
joined_table : `~astropy.table.Table` object
New table containing the result of the join operation.
"""
# Store user-provided col_name_map until the end
_col_name_map = col_name_map
# Special column name for cartesian join, should never collide with real column
cartesian_index_name = '__table_cartesian_join_temp_index__'
if join_type not in ('inner', 'outer', 'left', 'right', 'cartesian'):
raise ValueError("The 'join_type' argument should be in 'inner', "
"'outer', 'left', 'right', or 'cartesian' "
"(got '{}' instead)".
format(join_type))
if join_type == 'cartesian':
if keys:
raise ValueError('cannot supply keys for a cartesian join')
if join_funcs:
raise ValueError('cannot supply join_funcs for a cartesian join')
# Make light copies of left and right, then add temporary index columns
# with all the same value so later an outer join turns into a cartesian join.
left = left.copy(copy_data=False)
right = right.copy(copy_data=False)
left[cartesian_index_name] = np.uint8(0)
right[cartesian_index_name] = np.uint8(0)
keys = (cartesian_index_name, )
# Handle the case of join key columns that are different between left and
# right via keys_left/keys_right args. This is done by saving the original
# input tables and making new left and right tables that contain only the
# key cols but with common column names ['0', '1', etc]. This sets `keys` to
# those fake key names in the left and right tables
if keys_left is not None or keys_right is not None:
left_orig = left
right_orig = right
left, right, keys = _join_keys_left_right(
left, right, keys, keys_left, keys_right, join_funcs)
if keys is None:
keys = tuple(name for name in left.colnames if name in right.colnames)
if len(keys) == 0:
raise TableMergeError('No keys in common between left and right tables')
elif isinstance(keys, str):
# If we have a single key, put it in a tuple
keys = (keys,)
# Check the key columns
for arr, arr_label in ((left, 'Left'), (right, 'Right')):
for name in keys:
if name not in arr.colnames:
raise TableMergeError('{} table does not have key column {!r}'
.format(arr_label, name))
if hasattr(arr[name], 'mask') and np.any(arr[name].mask):
raise TableMergeError('{} key column {!r} has missing values'
.format(arr_label, name))
if join_funcs is not None:
if not all(key in keys for key in join_funcs):
raise ValueError(f'join_funcs keys {join_funcs.keys()} must be a '
f'subset of join keys {keys}')
left, right, keys = _apply_join_funcs(left, right, keys, join_funcs)
len_left, len_right = len(left), len(right)
if len_left == 0 or len_right == 0:
raise ValueError('input tables for join must both have at least one row')
try:
idxs, idx_sort = _get_join_sort_idxs(keys, left, right)
except NotImplementedError:
raise TypeError('one or more key columns are not sortable')
# Now that we have idxs and idx_sort, revert to the original table args to
# carry on with making the output joined table. `keys` is set to to an empty
# list so that all original left and right columns are included in the
# output table.
if keys_left is not None or keys_right is not None:
keys = []
left = left_orig
right = right_orig
# Joined array dtype as a list of descr (name, type_str, shape) tuples
col_name_map = get_col_name_map([left, right], keys, uniq_col_name, table_names)
out_descrs = get_descrs([left, right], col_name_map)
# Main inner loop in Cython to compute the cartesian product
# indices for the given join type
int_join_type = {'inner': 0, 'outer': 1, 'left': 2, 'right': 3,
'cartesian': 1}[join_type]
masked, n_out, left_out, left_mask, right_out, right_mask = \
_np_utils.join_inner(idxs, idx_sort, len_left, int_join_type)
out = _get_out_class([left, right])()
for out_name, dtype, shape in out_descrs:
if out_name == cartesian_index_name:
continue
left_name, right_name = col_name_map[out_name]
if left_name and right_name: # this is a key which comes from left and right
cols = [left[left_name], right[right_name]]
col_cls = _get_out_class(cols)
if not hasattr(col_cls.info, 'new_like'):
raise NotImplementedError('join unavailable for mixin column type(s): {}'
.format(col_cls.__name__))
out[out_name] = col_cls.info.new_like(cols, n_out, metadata_conflicts, out_name)
out[out_name][:] = np.where(right_mask,
left[left_name].take(left_out),
right[right_name].take(right_out))
continue
elif left_name: # out_name came from the left table
name, array, array_out, array_mask = left_name, left, left_out, left_mask
elif right_name:
name, array, array_out, array_mask = right_name, right, right_out, right_mask
else:
raise TableMergeError('Unexpected column names (maybe one is ""?)')
# Select the correct elements from the original table
col = array[name][array_out]
# If the output column is masked then set the output column masking
# accordingly. Check for columns that don't support a mask attribute.
if masked and np.any(array_mask):
# If col is a Column but not MaskedColumn then upgrade at this point
# because masking is required.
if isinstance(col, Column) and not isinstance(col, MaskedColumn):
col = out.MaskedColumn(col, copy=False)
if isinstance(col, Quantity) and not isinstance(col, Masked):
col = Masked(col, copy=False)
# array_mask is 1-d corresponding to length of output column. We need
# make it have the correct shape for broadcasting, i.e. (length, 1, 1, ..).
# Mixin columns might not have ndim attribute so use len(col.shape).
array_mask.shape = (col.shape[0],) + (1,) * (len(col.shape) - 1)
# Now broadcast to the correct final shape
array_mask = np.broadcast_to(array_mask, col.shape)
try:
col[array_mask] = col.info.mask_val
except Exception as err: # Not clear how different classes will fail here
raise NotImplementedError(
"join requires masking column '{}' but column"
" type {} does not support masking"
.format(out_name, col.__class__.__name__)) from err
# Set the output table column to the new joined column
out[out_name] = col
# If col_name_map supplied as a dict input, then update.
if isinstance(_col_name_map, Mapping):
_col_name_map.update(col_name_map)
return out
def _join_keys_left_right(left, right, keys, keys_left, keys_right, join_funcs):
"""Do processing to handle keys_left / keys_right args for join.
This takes the keys_left/right inputs and turns them into a list of left/right
columns corresponding to those inputs (which can be column names or column
data values). It also generates the list of fake key column names (strings
of "1", "2", etc.) that correspond to the input keys.
"""
def _keys_to_cols(keys, table, label):
# Process input `keys`, which is a str or list of str column names in
# `table` or a list of column-like objects. The `label` is just for
# error reporting.
if isinstance(keys, str):
keys = [keys]
cols = []
for key in keys:
if isinstance(key, str):
try:
cols.append(table[key])
except KeyError:
raise ValueError(f'{label} table does not have key column {key!r}')
else:
if len(key) != len(table):
raise ValueError(f'{label} table has different length from key {key}')
cols.append(key)
return cols
if join_funcs is not None:
raise ValueError('cannot supply join_funcs arg and keys_left / keys_right')
if keys_left is None or keys_right is None:
raise ValueError('keys_left and keys_right must both be provided')
if keys is not None:
raise ValueError('keys arg must be None if keys_left and keys_right are supplied')
cols_left = _keys_to_cols(keys_left, left, 'left')
cols_right = _keys_to_cols(keys_right, right, 'right')
if len(cols_left) != len(cols_right):
raise ValueError('keys_left and keys_right args must have same length')
# Make two new temp tables for the join with only the join columns and
# key columns in common.
keys = [f'{ii}' for ii in range(len(cols_left))]
left = left.__class__(cols_left, names=keys, copy=False)
right = right.__class__(cols_right, names=keys, copy=False)
return left, right, keys
def _check_join_type(join_type, func_name):
"""Check join_type arg in hstack and vstack.
This specifically checks for the common mistake of call vstack(t1, t2)
instead of vstack([t1, t2]). The subsequent check of
``join_type in ('inner', ..)`` does not raise in this case.
"""
if not isinstance(join_type, str):
msg = '`join_type` arg must be a string'
if isinstance(join_type, Table):
msg += ('. Did you accidentally '
f'call {func_name}(t1, t2, ..) instead of '
f'{func_name}([t1, t2], ..)?')
raise TypeError(msg)
if join_type not in ('inner', 'exact', 'outer'):
raise ValueError("`join_type` arg must be one of 'inner', 'exact' or 'outer'")
def _vstack(arrays, join_type='outer', col_name_map=None, metadata_conflicts='warn'):
"""
Stack Tables vertically (by rows)
A ``join_type`` of 'exact' (default) means that the arrays must all
have exactly the same column names (though the order can vary). If
``join_type`` is 'inner' then the intersection of common columns will
be the output. A value of 'outer' means the output will have the union of
all columns, with array values being masked where no common values are
available.
Parameters
----------
arrays : list of Tables
Tables to stack by rows (vertically)
join_type : str
Join type ('inner' | 'exact' | 'outer'), default is 'outer'
col_name_map : empty dict or None
If passed as a dict then it will be updated in-place with the
mapping of output to input column names.
Returns
-------
stacked_table : `~astropy.table.Table` object
New table containing the stacked data from the input tables.
"""
# Store user-provided col_name_map until the end
_col_name_map = col_name_map
# Trivial case of one input array
if len(arrays) == 1:
return arrays[0]
# Start by assuming an outer match where all names go to output
names = set(itertools.chain(*[arr.colnames for arr in arrays]))
col_name_map = get_col_name_map(arrays, names)
# If require_match is True then the output must have exactly the same
# number of columns as each input array
if join_type == 'exact':
for names in col_name_map.values():
if any(x is None for x in names):
raise TableMergeError('Inconsistent columns in input arrays '
"(use 'inner' or 'outer' join_type to "
"allow non-matching columns)")
join_type = 'outer'
# For an inner join, keep only columns where all input arrays have that column
if join_type == 'inner':
col_name_map = OrderedDict((name, in_names) for name, in_names in col_name_map.items()
if all(x is not None for x in in_names))
if len(col_name_map) == 0:
raise TableMergeError('Input arrays have no columns in common')
lens = [len(arr) for arr in arrays]
n_rows = sum(lens)
out = _get_out_class(arrays)()
for out_name, in_names in col_name_map.items():
# List of input arrays that contribute to this output column
cols = [arr[name] for arr, name in zip(arrays, in_names) if name is not None]
col_cls = _get_out_class(cols)
if not hasattr(col_cls.info, 'new_like'):
raise NotImplementedError('vstack unavailable for mixin column type(s): {}'
.format(col_cls.__name__))
try:
col = col_cls.info.new_like(cols, n_rows, metadata_conflicts, out_name)
except metadata.MergeConflictError as err:
# Beautify the error message when we are trying to merge columns with incompatible
# types by including the name of the columns that originated the error.
raise TableMergeError("The '{}' columns have incompatible types: {}"
.format(out_name, err._incompat_types)) from err
idx0 = 0
for name, array in zip(in_names, arrays):
idx1 = idx0 + len(array)
if name in array.colnames:
col[idx0:idx1] = array[name]
else:
# If col is a Column but not MaskedColumn then upgrade at this point
# because masking is required.
if isinstance(col, Column) and not isinstance(col, MaskedColumn):
col = out.MaskedColumn(col, copy=False)
if isinstance(col, Quantity) and not isinstance(col, Masked):
col = Masked(col, copy=False)
try:
col[idx0:idx1] = col.info.mask_val
except Exception as err:
raise NotImplementedError(
"vstack requires masking column '{}' but column"
" type {} does not support masking"
.format(out_name, col.__class__.__name__)) from err
idx0 = idx1
out[out_name] = col
# If col_name_map supplied as a dict input, then update.
if isinstance(_col_name_map, Mapping):
_col_name_map.update(col_name_map)
return out
def _hstack(arrays, join_type='outer', uniq_col_name='{col_name}_{table_name}',
table_names=None, col_name_map=None):
"""
Stack tables horizontally (by columns)
A ``join_type`` of 'exact' (default) means that the arrays must all
have exactly the same number of rows. If ``join_type`` is 'inner' then
the intersection of rows will be the output. A value of 'outer' means
the output will have the union of all rows, with array values being
masked where no common values are available.
Parameters
----------
arrays : List of tables
Tables to stack by columns (horizontally)
join_type : str
Join type ('inner' | 'exact' | 'outer'), default is 'outer'
uniq_col_name : str or None
String generate a unique output column name in case of a conflict.
The default is '{col_name}_{table_name}'.
table_names : list of str or None
Two-element list of table names used when generating unique output
column names. The default is ['1', '2', ..].
Returns
-------
stacked_table : `~astropy.table.Table` object
New table containing the stacked data from the input tables.
"""
# Store user-provided col_name_map until the end
_col_name_map = col_name_map
if table_names is None:
table_names = [f'{ii + 1}' for ii in range(len(arrays))]
if len(arrays) != len(table_names):
raise ValueError('Number of arrays must match number of table_names')
# Trivial case of one input arrays
if len(arrays) == 1:
return arrays[0]
col_name_map = get_col_name_map(arrays, [], uniq_col_name, table_names)
# If require_match is True then all input arrays must have the same length
arr_lens = [len(arr) for arr in arrays]
if join_type == 'exact':
if len(set(arr_lens)) > 1:
raise TableMergeError("Inconsistent number of rows in input arrays "
"(use 'inner' or 'outer' join_type to allow "
"non-matching rows)")
join_type = 'outer'
# For an inner join, keep only the common rows
if join_type == 'inner':
min_arr_len = min(arr_lens)
if len(set(arr_lens)) > 1:
arrays = [arr[:min_arr_len] for arr in arrays]
arr_lens = [min_arr_len for arr in arrays]
# If there are any output rows where one or more input arrays are missing
# then the output must be masked. If any input arrays are masked then
# output is masked.
n_rows = max(arr_lens)
out = _get_out_class(arrays)()
for out_name, in_names in col_name_map.items():
for name, array, arr_len in zip(in_names, arrays, arr_lens):
if name is None:
continue
if n_rows > arr_len:
indices = np.arange(n_rows)
indices[arr_len:] = 0
col = array[name][indices]
# If col is a Column but not MaskedColumn then upgrade at this point
# because masking is required.
if isinstance(col, Column) and not isinstance(col, MaskedColumn):
col = out.MaskedColumn(col, copy=False)
if isinstance(col, Quantity) and not isinstance(col, Masked):
col = Masked(col, copy=False)
try:
col[arr_len:] = col.info.mask_val
except Exception as err:
raise NotImplementedError(
"hstack requires masking column '{}' but column"
" type {} does not support masking"
.format(out_name, col.__class__.__name__)) from err
else:
col = array[name][:n_rows]
out[out_name] = col
# If col_name_map supplied as a dict input, then update.
if isinstance(_col_name_map, Mapping):
_col_name_map.update(col_name_map)
return out
|
5d6ff74b9036ffb71dfcca78df065df8b7f1d2fa8e0d7f6f7cac39b25e8bae90 | """
High-level operations for numpy structured arrays.
Some code and inspiration taken from numpy.lib.recfunctions.join_by().
Redistribution license restrictions apply.
"""
import collections
from collections import OrderedDict, Counter
from collections.abc import Sequence
import numpy as np
__all__ = ['TableMergeError']
class TableMergeError(ValueError):
pass
def get_col_name_map(arrays, common_names, uniq_col_name='{col_name}_{table_name}',
table_names=None):
"""
Find the column names mapping when merging the list of structured ndarrays
``arrays``. It is assumed that col names in ``common_names`` are to be
merged into a single column while the rest will be uniquely represented
in the output. The args ``uniq_col_name`` and ``table_names`` specify
how to rename columns in case of conflicts.
Returns a dict mapping each output column name to the input(s). This takes the form
{outname : (col_name_0, col_name_1, ...), ... }. For key columns all of input names
will be present, while for the other non-key columns the value will be (col_name_0,
None, ..) or (None, col_name_1, ..) etc.
"""
col_name_map = collections.defaultdict(lambda: [None] * len(arrays))
col_name_list = []
if table_names is None:
table_names = [str(ii + 1) for ii in range(len(arrays))]
for idx, array in enumerate(arrays):
table_name = table_names[idx]
for name in array.dtype.names:
out_name = name
if name in common_names:
# If name is in the list of common_names then insert into
# the column name list, but just once.
if name not in col_name_list:
col_name_list.append(name)
else:
# If name is not one of the common column outputs, and it collides
# with the names in one of the other arrays, then rename
others = list(arrays)
others.pop(idx)
if any(name in other.dtype.names for other in others):
out_name = uniq_col_name.format(table_name=table_name, col_name=name)
col_name_list.append(out_name)
col_name_map[out_name][idx] = name
# Check for duplicate output column names
col_name_count = Counter(col_name_list)
repeated_names = [name for name, count in col_name_count.items() if count > 1]
if repeated_names:
raise TableMergeError('Merging column names resulted in duplicates: {}. '
'Change uniq_col_name or table_names args to fix this.'
.format(repeated_names))
# Convert col_name_map to a regular dict with tuple (immutable) values
col_name_map = OrderedDict((name, col_name_map[name]) for name in col_name_list)
return col_name_map
def get_descrs(arrays, col_name_map):
"""
Find the dtypes descrs resulting from merging the list of arrays' dtypes,
using the column name mapping ``col_name_map``.
Return a list of descrs for the output.
"""
out_descrs = []
for out_name, in_names in col_name_map.items():
# List of input arrays that contribute to this output column
in_cols = [arr[name] for arr, name in zip(arrays, in_names) if name is not None]
# List of names of the columns that contribute to this output column.
names = [name for name in in_names if name is not None]
# Output dtype is the superset of all dtypes in in_arrays
try:
dtype = common_dtype(in_cols)
except TableMergeError as tme:
# Beautify the error message when we are trying to merge columns with incompatible
# types by including the name of the columns that originated the error.
raise TableMergeError("The '{}' columns have incompatible types: {}"
.format(names[0], tme._incompat_types)) from tme
# Make sure all input shapes are the same
uniq_shapes = {col.shape[1:] for col in in_cols}
if len(uniq_shapes) != 1:
raise TableMergeError('Key columns have different shape')
shape = uniq_shapes.pop()
if out_name is not None:
out_name = str(out_name)
out_descrs.append((out_name, dtype, shape))
return out_descrs
def common_dtype(cols):
"""
Use numpy to find the common dtype for a list of structured ndarray columns.
Only allow columns within the following fundamental numpy data types:
np.bool_, np.object_, np.number, np.character, np.void
"""
np_types = (np.bool_, np.object_, np.number, np.character, np.void)
uniq_types = {tuple(issubclass(col.dtype.type, np_type) for np_type in np_types)
for col in cols}
if len(uniq_types) > 1:
# Embed into the exception the actual list of incompatible types.
incompat_types = [col.dtype.name for col in cols]
tme = TableMergeError(f'Columns have incompatible types {incompat_types}')
tme._incompat_types = incompat_types
raise tme
arrs = [np.empty(1, dtype=col.dtype) for col in cols]
# For string-type arrays need to explicitly fill in non-zero
# values or the final arr_common = .. step is unpredictable.
for arr in arrs:
if arr.dtype.kind in ('S', 'U'):
arr[0] = '0' * arr.itemsize
arr_common = np.array([arr[0] for arr in arrs])
return arr_common.dtype.str
def _check_for_sequence_of_structured_arrays(arrays):
err = '`arrays` arg must be a sequence (e.g. list) of structured arrays'
if not isinstance(arrays, Sequence):
raise TypeError(err)
for array in arrays:
# Must be structured array
if not isinstance(array, np.ndarray) or array.dtype.names is None:
raise TypeError(err)
if len(arrays) == 0:
raise ValueError('`arrays` arg must include at least one array')
|
f34cd94e9a7bde95e968184ac9c4e7b5c1af54e50ef4e094d09fb8722b7c7545 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
def _searchsorted(array, val, side='left'):
'''
Call np.searchsorted or use a custom binary
search if necessary.
'''
if hasattr(array, 'searchsorted'):
return array.searchsorted(val, side=side)
# Python binary search
begin = 0
end = len(array)
while begin < end:
mid = (begin + end) // 2
if val > array[mid]:
begin = mid + 1
elif val < array[mid]:
end = mid
elif side == 'right':
begin = mid + 1
else:
end = mid
return begin
class SortedArray:
'''
Implements a sorted array container using
a list of numpy arrays.
Parameters
----------
data : Table
Sorted columns of the original table
row_index : Column object
Row numbers corresponding to data columns
unique : bool
Whether the values of the index must be unique.
Defaults to False.
'''
def __init__(self, data, row_index, unique=False):
self.data = data
self.row_index = row_index
self.num_cols = len(getattr(data, 'colnames', []))
self.unique = unique
@property
def cols(self):
return list(self.data.columns.values())
def add(self, key, row):
'''
Add a new entry to the sorted array.
Parameters
----------
key : tuple
Column values at the given row
row : int
Row number
'''
pos = self.find_pos(key, row) # first >= key
if self.unique and 0 <= pos < len(self.row_index) and \
all(self.data[pos][i] == key[i] for i in range(len(key))):
# already exists
raise ValueError(f'Cannot add duplicate value "{key}" in a unique index')
self.data.insert_row(pos, key)
self.row_index = self.row_index.insert(pos, row)
def _get_key_slice(self, i, begin, end):
'''
Retrieve the ith slice of the sorted array
from begin to end.
'''
if i < self.num_cols:
return self.cols[i][begin:end]
else:
return self.row_index[begin:end]
def find_pos(self, key, data, exact=False):
'''
Return the index of the largest key in data greater than or
equal to the given key, data pair.
Parameters
----------
key : tuple
Column key
data : int
Row number
exact : bool
If True, return the index of the given key in data
or -1 if the key is not present.
'''
begin = 0
end = len(self.row_index)
num_cols = self.num_cols
if not self.unique:
# consider the row value as well
key = key + (data,)
num_cols += 1
# search through keys in lexicographic order
for i in range(num_cols):
key_slice = self._get_key_slice(i, begin, end)
t = _searchsorted(key_slice, key[i])
# t is the smallest index >= key[i]
if exact and (t == len(key_slice) or key_slice[t] != key[i]):
# no match
return -1
elif t == len(key_slice) or (t == 0 and len(key_slice) > 0
and key[i] < key_slice[0]):
# too small or too large
return begin + t
end = begin + _searchsorted(key_slice, key[i], side='right')
begin += t
if begin >= len(self.row_index): # greater than all keys
return begin
return begin
def find(self, key):
'''
Find all rows matching the given key.
Parameters
----------
key : tuple
Column values
Returns
-------
matching_rows : list
List of rows matching the input key
'''
begin = 0
end = len(self.row_index)
# search through keys in lexicographic order
for i in range(self.num_cols):
key_slice = self._get_key_slice(i, begin, end)
t = _searchsorted(key_slice, key[i])
# t is the smallest index >= key[i]
if t == len(key_slice) or key_slice[t] != key[i]:
# no match
return []
elif t == 0 and len(key_slice) > 0 and key[i] < key_slice[0]:
# too small or too large
return []
end = begin + _searchsorted(key_slice, key[i], side='right')
begin += t
if begin >= len(self.row_index): # greater than all keys
return []
return self.row_index[begin:end]
def range(self, lower, upper, bounds):
'''
Find values in the given range.
Parameters
----------
lower : tuple
Lower search bound
upper : tuple
Upper search bound
bounds : (2,) tuple of bool
Indicates whether the search should be inclusive or
exclusive with respect to the endpoints. The first
argument corresponds to an inclusive lower bound,
and the second argument to an inclusive upper bound.
'''
lower_pos = self.find_pos(lower, 0)
upper_pos = self.find_pos(upper, 0)
if lower_pos == len(self.row_index):
return []
lower_bound = tuple(col[lower_pos] for col in self.cols)
if not bounds[0] and lower_bound == lower:
lower_pos += 1 # data[lower_pos] > lower
# data[lower_pos] >= lower
# data[upper_pos] >= upper
if upper_pos < len(self.row_index):
upper_bound = tuple(col[upper_pos] for col in self.cols)
if not bounds[1] and upper_bound == upper:
upper_pos -= 1 # data[upper_pos] < upper
elif upper_bound > upper:
upper_pos -= 1 # data[upper_pos] <= upper
return self.row_index[lower_pos:upper_pos + 1]
def remove(self, key, data):
'''
Remove the given entry from the sorted array.
Parameters
----------
key : tuple
Column values
data : int
Row number
Returns
-------
successful : bool
Whether the entry was successfully removed
'''
pos = self.find_pos(key, data, exact=True)
if pos == -1: # key not found
return False
self.data.remove_row(pos)
keep_mask = np.ones(len(self.row_index), dtype=bool)
keep_mask[pos] = False
self.row_index = self.row_index[keep_mask]
return True
def shift_left(self, row):
'''
Decrement all row numbers greater than the input row.
Parameters
----------
row : int
Input row number
'''
self.row_index[self.row_index > row] -= 1
def shift_right(self, row):
'''
Increment all row numbers greater than or equal to the input row.
Parameters
----------
row : int
Input row number
'''
self.row_index[self.row_index >= row] += 1
def replace_rows(self, row_map):
'''
Replace all rows with the values they map to in the
given dictionary. Any rows not present as keys in
the dictionary will have their entries deleted.
Parameters
----------
row_map : dict
Mapping of row numbers to new row numbers
'''
num_rows = len(row_map)
keep_rows = np.zeros(len(self.row_index), dtype=bool)
tagged = 0
for i, row in enumerate(self.row_index):
if row in row_map:
keep_rows[i] = True
tagged += 1
if tagged == num_rows:
break
self.data = self.data[keep_rows]
self.row_index = np.array(
[row_map[x] for x in self.row_index[keep_rows]])
def items(self):
'''
Retrieve all array items as a list of pairs of the form
[(key, [row 1, row 2, ...]), ...]
'''
array = []
last_key = None
for i, key in enumerate(zip(*self.data.columns.values())):
row = self.row_index[i]
if key == last_key:
array[-1][1].append(row)
else:
last_key = key
array.append((key, [row]))
return array
def sort(self):
'''
Make row order align with key order.
'''
self.row_index = np.arange(len(self.row_index))
def sorted_data(self):
'''
Return rows in sorted order.
'''
return self.row_index
def __getitem__(self, item):
'''
Return a sliced reference to this sorted array.
Parameters
----------
item : slice
Slice to use for referencing
'''
return SortedArray(self.data[item], self.row_index[item])
def __repr__(self):
t = self.data.copy()
t['rows'] = self.row_index
return f'<{self.__class__.__name__} length={len(t)}>\n{t}'
|
f530888282f8265940b34f15a93cb2e19859160828a8f2e2d1b5f5ac75f7c495 | ascii_coded = ('Ò♙♙♙♙♙♙♙♙♌♐♐♌♙♙♙♙♙♙♌♌♙♙Ò♙♙♙♙♙♙♙♘♐♐♐♈♙♙♙♙♙♌♐♐♐♔Ò♙♙♌♈♙♙♌♐♈♈♙♙♙♙♙♙♙♙♈♐♐♙Ò♙♐♙♙♙♐♐♙♙♙'
'♙♙♙♙♙♙♙♙♙♙♙♙Ò♐♔♙♙♘♐♐♙♙♌♐♐♔♙♙♌♌♌♙♙♙♌Ò♐♐♙♙♘♐♐♌♙♈♐♈♙♙♙♈♐♐♙♙♘♔Ò♐♐♌♙♘♐♐♐♌♌♙♙♌♌♌♙♈♈♙♌♐'
'♐Ò♘♐♐♐♌♐♐♐♐♐♐♌♙♈♙♌♐♐♐♐♐♔Ò♘♐♐♐♐♐♐♐♐♐♐♐♐♈♈♐♐♐♐♐♐♙Ò♙♘♐♐♐♐♈♐♐♐♐♐♐♙♙♐♐♐♐♐♙♙Ò♙♙♙♈♈♈♙♙♐'
'♐♐♐♐♔♙♐♐♐♐♈♙♙Ò♙♙♙♙♙♙♙♙♙♈♈♐♐♐♙♈♈♈♙♙♙♙Ò')
ascii_uncoded = ''.join([chr(ord(c) - 200) for c in ascii_coded])
url = 'https://media.giphy.com/media/e24Q8FKE2mxRS/giphy.gif'
message_coded = 'ĘĩĶĬĩĻ÷ĜĩĪĴĭèıĶļĭĺĩīļıķĶ'
message_uncoded = ''.join([chr(ord(c) - 200) for c in message_coded])
try:
from IPython import display
html = display.Image(url=url)._repr_html_()
class HTMLWithBackup(display.HTML):
def __init__(self, data, backup_text):
super().__init__(data)
self.backup_text = backup_text
def __repr__(self):
if self.backup_text is None:
return super().__repr__()
else:
return self.backup_text
dhtml = HTMLWithBackup(html, ascii_uncoded)
display.display(dhtml)
except ImportError:
print(ascii_uncoded)
except (UnicodeEncodeError, SyntaxError):
pass
|
91d032043675a1d9b5956b77cda5ae68738a8ee669e9525744dcaa4de8033044 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The SCEngine class uses the ``sortedcontainers`` package to implement an
Index engine for Tables.
"""
from collections import OrderedDict
from itertools import starmap
from astropy.utils.compat.optional_deps import HAS_SORTEDCONTAINERS
if HAS_SORTEDCONTAINERS:
from sortedcontainers import SortedList
class Node:
__slots__ = ('key', 'value')
def __init__(self, key, value):
self.key = key
self.value = value
def __lt__(self, other):
if other.__class__ is Node:
return (self.key, self.value) < (other.key, other.value)
return self.key < other
def __le__(self, other):
if other.__class__ is Node:
return (self.key, self.value) <= (other.key, other.value)
return self.key <= other
def __eq__(self, other):
if other.__class__ is Node:
return (self.key, self.value) == (other.key, other.value)
return self.key == other
def __ne__(self, other):
if other.__class__ is Node:
return (self.key, self.value) != (other.key, other.value)
return self.key != other
def __gt__(self, other):
if other.__class__ is Node:
return (self.key, self.value) > (other.key, other.value)
return self.key > other
def __ge__(self, other):
if other.__class__ is Node:
return (self.key, self.value) >= (other.key, other.value)
return self.key >= other
__hash__ = None
def __repr__(self):
return f'Node({self.key!r}, {self.value!r})'
class SCEngine:
'''
Fast tree-based implementation for indexing, using the
``sortedcontainers`` package.
Parameters
----------
data : Table
Sorted columns of the original table
row_index : Column object
Row numbers corresponding to data columns
unique : bool
Whether the values of the index must be unique.
Defaults to False.
'''
def __init__(self, data, row_index, unique=False):
if not HAS_SORTEDCONTAINERS:
raise ImportError("sortedcontainers is needed for using SCEngine")
node_keys = map(tuple, data)
self._nodes = SortedList(starmap(Node, zip(node_keys, row_index)))
self._unique = unique
def add(self, key, value):
'''
Add a key, value pair.
'''
if self._unique and (key in self._nodes):
message = f'duplicate {key!r} in unique index'
raise ValueError(message)
self._nodes.add(Node(key, value))
def find(self, key):
'''
Find rows corresponding to the given key.
'''
return [node.value for node in self._nodes.irange(key, key)]
def remove(self, key, data=None):
'''
Remove data from the given key.
'''
if data is not None:
item = Node(key, data)
try:
self._nodes.remove(item)
except ValueError:
return False
return True
items = list(self._nodes.irange(key, key))
for item in items:
self._nodes.remove(item)
return bool(items)
def shift_left(self, row):
'''
Decrement rows larger than the given row.
'''
for node in self._nodes:
if node.value > row:
node.value -= 1
def shift_right(self, row):
'''
Increment rows greater than or equal to the given row.
'''
for node in self._nodes:
if node.value >= row:
node.value += 1
def items(self):
'''
Return a list of key, data tuples.
'''
result = OrderedDict()
for node in self._nodes:
if node.key in result:
result[node.key].append(node.value)
else:
result[node.key] = [node.value]
return result.items()
def sort(self):
'''
Make row order align with key order.
'''
for index, node in enumerate(self._nodes):
node.value = index
def sorted_data(self):
'''
Return a list of rows in order sorted by key.
'''
return [node.value for node in self._nodes]
def range(self, lower, upper, bounds=(True, True)):
'''
Return row values in the given range.
'''
iterator = self._nodes.irange(lower, upper, bounds)
return [node.value for node in iterator]
def replace_rows(self, row_map):
'''
Replace rows with the values in row_map.
'''
nodes = [node for node in self._nodes if node.value in row_map]
for node in nodes:
node.value = row_map[node.value]
self._nodes.clear()
self._nodes.update(nodes)
def __repr__(self):
if len(self._nodes) > 6:
nodes = list(self._nodes[:3]) + ['...'] + list(self._nodes[-3:])
else:
nodes = self._nodes
nodes_str = ', '.join(str(node) for node in nodes)
return f'<{self.__class__.__name__} nodes={nodes_str}>'
|
f11b5c7ab83394980209c930ad687c2454095a0976f54ee8c45d3e8290f000b4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import math
import numpy as np
from astropy.modeling import models
from astropy.modeling.core import Fittable1DModel, Fittable2DModel
from .core import Kernel, Kernel1D, Kernel2D
from .utils import has_even_axis, raise_even_kernel_exception
__all__ = ['Gaussian1DKernel', 'Gaussian2DKernel', 'CustomKernel',
'Box1DKernel', 'Box2DKernel', 'Tophat2DKernel',
'Trapezoid1DKernel', 'RickerWavelet1DKernel', 'RickerWavelet2DKernel',
'AiryDisk2DKernel', 'Moffat2DKernel', 'Model1DKernel',
'Model2DKernel', 'TrapezoidDisk2DKernel', 'Ring2DKernel']
def _round_up_to_odd_integer(value):
i = math.ceil(value)
if i % 2 == 0:
return i + 1
else:
return i
class Gaussian1DKernel(Kernel1D):
"""
1D Gaussian filter kernel.
The Gaussian filter is a filter with great smoothing properties. It is
isotropic and does not produce artifacts.
The generated kernel is normalized so that it integrates to 1.
Parameters
----------
stddev : number
Standard deviation of the Gaussian kernel.
x_size : int, optional
Size of the kernel array. Default = ⌊8*stddev+1⌋.
mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by linearly interpolating
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin. Very slow.
factor : number, optional
Factor of oversampling. Default factor = 10. If the factor
is too large, evaluation can be very slow.
See Also
--------
Box1DKernel, Trapezoid1DKernel, RickerWavelet1DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Gaussian1DKernel
gauss_1D_kernel = Gaussian1DKernel(10)
plt.plot(gauss_1D_kernel, drawstyle='steps')
plt.xlabel('x [pixels]')
plt.ylabel('value')
plt.show()
"""
_separable = True
_is_bool = False
def __init__(self, stddev, **kwargs):
self._model = models.Gaussian1D(1. / (np.sqrt(2 * np.pi) * stddev),
0, stddev)
self._default_size = _round_up_to_odd_integer(8 * stddev)
super().__init__(**kwargs)
self.normalize()
class Gaussian2DKernel(Kernel2D):
"""
2D Gaussian filter kernel.
The Gaussian filter is a filter with great smoothing properties. It is
isotropic and does not produce artifacts.
The generated kernel is normalized so that it integrates to 1.
Parameters
----------
x_stddev : float
Standard deviation of the Gaussian in x before rotating by theta.
y_stddev : float
Standard deviation of the Gaussian in y before rotating by theta.
theta : float or `~astropy.units.Quantity` ['angle']
Rotation angle. If passed as a float, it is assumed to be in radians.
The rotation angle increases counterclockwise.
x_size : int, optional
Size in x direction of the kernel array. Default = ⌊8*stddev + 1⌋.
y_size : int, optional
Size in y direction of the kernel array. Default = ⌊8*stddev + 1⌋.
mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Box2DKernel, Tophat2DKernel, RickerWavelet2DKernel, Ring2DKernel,
TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Gaussian2DKernel
gaussian_2D_kernel = Gaussian2DKernel(10)
plt.imshow(gaussian_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_separable = True
_is_bool = False
def __init__(self, x_stddev, y_stddev=None, theta=0.0, **kwargs):
if y_stddev is None:
y_stddev = x_stddev
self._model = models.Gaussian2D(1. / (2 * np.pi * x_stddev * y_stddev),
0, 0, x_stddev=x_stddev,
y_stddev=y_stddev, theta=theta)
self._default_size = _round_up_to_odd_integer(
8 * np.max([x_stddev, y_stddev]))
super().__init__(**kwargs)
self.normalize()
class Box1DKernel(Kernel1D):
"""
1D Box filter kernel.
The Box filter or running mean is a smoothing filter. It is not isotropic
and can produce artifacts when applied repeatedly to the same data.
The generated kernel is normalized so that it integrates to 1.
By default the Box kernel uses the ``linear_interp`` discretization mode,
which allows non-shifting, even-sized kernels. This is achieved by
weighting the edge pixels with 1/2. E.g a Box kernel with an effective
smoothing of 4 pixel would have the following array: [0.5, 1, 1, 1, 0.5].
Parameters
----------
width : number
Width of the filter kernel.
mode : {'linear_interp', 'center', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'linear_interp' (default)
Discretize model by linearly interpolating
between the values at the corners of the bin.
* 'center'
Discretize model by taking the value
at the center of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian1DKernel, Trapezoid1DKernel, RickerWavelet1DKernel
Examples
--------
Kernel response function:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Box1DKernel
box_1D_kernel = Box1DKernel(9)
plt.plot(box_1D_kernel, drawstyle='steps')
plt.xlim(-1, 9)
plt.xlabel('x [pixels]')
plt.ylabel('value')
plt.show()
"""
_separable = True
_is_bool = True
def __init__(self, width, **kwargs):
self._model = models.Box1D(1. / width, 0, width)
self._default_size = _round_up_to_odd_integer(width)
kwargs['mode'] = 'linear_interp'
super().__init__(**kwargs)
self.normalize()
class Box2DKernel(Kernel2D):
"""
2D Box filter kernel.
The Box filter or running mean is a smoothing filter. It is not isotropic
and can produce artifacts when applied repeatedly to the same data.
The generated kernel is normalized so that it integrates to 1.
By default the Box kernel uses the ``linear_interp`` discretization mode,
which allows non-shifting, even-sized kernels. This is achieved by
weighting the edge pixels with 1/2.
Parameters
----------
width : number
Width of the filter kernel.
mode : {'linear_interp', 'center', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'linear_interp' (default)
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'center'
Discretize model by taking the value
at the center of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Tophat2DKernel, RickerWavelet2DKernel, Ring2DKernel,
TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Box2DKernel
box_2D_kernel = Box2DKernel(9)
plt.imshow(box_2D_kernel, interpolation='none', origin='lower',
vmin=0.0, vmax=0.015)
plt.xlim(-1, 9)
plt.ylim(-1, 9)
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_separable = True
_is_bool = True
def __init__(self, width, **kwargs):
self._model = models.Box2D(1. / width ** 2, 0, 0, width, width)
self._default_size = _round_up_to_odd_integer(width)
kwargs['mode'] = 'linear_interp'
super().__init__(**kwargs)
self.normalize()
class Tophat2DKernel(Kernel2D):
"""
2D Tophat filter kernel.
The Tophat filter is an isotropic smoothing filter. It can produce
artifacts when applied repeatedly on the same data.
The generated kernel is normalized so that it integrates to 1.
Parameters
----------
radius : int
Radius of the filter kernel.
mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, RickerWavelet2DKernel, Ring2DKernel,
TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Tophat2DKernel
tophat_2D_kernel = Tophat2DKernel(40)
plt.imshow(tophat_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
def __init__(self, radius, **kwargs):
self._model = models.Disk2D(1. / (np.pi * radius ** 2), 0, 0, radius)
self._default_size = _round_up_to_odd_integer(2 * radius)
super().__init__(**kwargs)
self.normalize()
class Ring2DKernel(Kernel2D):
"""
2D Ring filter kernel.
The Ring filter kernel is the difference between two Tophat kernels of
different width. This kernel is useful for, e.g., background estimation.
The generated kernel is normalized so that it integrates to 1.
Parameters
----------
radius_in : number
Inner radius of the ring kernel.
width : number
Width of the ring kernel.
mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, Tophat2DKernel, RickerWavelet2DKernel,
TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Ring2DKernel
ring_2D_kernel = Ring2DKernel(9, 8)
plt.imshow(ring_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
def __init__(self, radius_in, width, **kwargs):
radius_out = radius_in + width
self._model = models.Ring2D(1. / (np.pi * (radius_out ** 2 - radius_in ** 2)),
0, 0, radius_in, width)
self._default_size = _round_up_to_odd_integer(2 * radius_out)
super().__init__(**kwargs)
self.normalize()
class Trapezoid1DKernel(Kernel1D):
"""
1D trapezoid kernel.
The generated kernel is normalized so that it integrates to 1.
Parameters
----------
width : number
Width of the filter kernel, defined as the width of the constant part,
before it begins to slope down.
slope : number
Slope of the filter kernel's tails
mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by linearly interpolating
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Box1DKernel, Gaussian1DKernel, RickerWavelet1DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Trapezoid1DKernel
trapezoid_1D_kernel = Trapezoid1DKernel(17, slope=0.2)
plt.plot(trapezoid_1D_kernel, drawstyle='steps')
plt.xlabel('x [pixels]')
plt.ylabel('amplitude')
plt.xlim(-1, 28)
plt.show()
"""
_is_bool = False
def __init__(self, width, slope=1., **kwargs):
self._model = models.Trapezoid1D(1, 0, width, slope)
self._default_size = _round_up_to_odd_integer(width + 2. / slope)
super().__init__(**kwargs)
self.normalize()
class TrapezoidDisk2DKernel(Kernel2D):
"""
2D trapezoid kernel.
The generated kernel is normalized so that it integrates to 1.
Parameters
----------
radius : number
Width of the filter kernel, defined as the width of the constant part,
before it begins to slope down.
slope : number
Slope of the filter kernel's tails
mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, Tophat2DKernel, RickerWavelet2DKernel,
Ring2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import TrapezoidDisk2DKernel
trapezoid_2D_kernel = TrapezoidDisk2DKernel(20, slope=0.2)
plt.imshow(trapezoid_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_is_bool = False
def __init__(self, radius, slope=1., **kwargs):
self._model = models.TrapezoidDisk2D(1, 0, 0, radius, slope)
self._default_size = _round_up_to_odd_integer(2 * radius + 2. / slope)
super().__init__(**kwargs)
self.normalize()
class RickerWavelet1DKernel(Kernel1D):
"""
1D Ricker wavelet filter kernel (sometimes known as a "Mexican Hat"
kernel).
The Ricker wavelet, or inverted Gaussian-Laplace filter, is a
bandpass filter. It smooths the data and removes slowly varying
or constant structures (e.g. Background). It is useful for peak or
multi-scale detection.
This kernel is derived from a normalized Gaussian function, by
computing the second derivative. This results in an amplitude
at the kernels center of 1. / (sqrt(2 * pi) * width ** 3). The
normalization is the same as for `scipy.ndimage.gaussian_laplace`,
except for a minus sign.
.. note::
See https://github.com/astropy/astropy/pull/9445 for discussions
related to renaming of this kernel.
Parameters
----------
width : number
Width of the filter kernel, defined as the standard deviation
of the Gaussian function from which it is derived.
x_size : int, optional
Size in x direction of the kernel array. Default = ⌊8*width +1⌋.
mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by linearly interpolating
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Box1DKernel, Gaussian1DKernel, Trapezoid1DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import RickerWavelet1DKernel
ricker_1d_kernel = RickerWavelet1DKernel(10)
plt.plot(ricker_1d_kernel, drawstyle='steps')
plt.xlabel('x [pixels]')
plt.ylabel('value')
plt.show()
"""
_is_bool = True
def __init__(self, width, **kwargs):
amplitude = 1.0 / (np.sqrt(2 * np.pi) * width ** 3)
self._model = models.RickerWavelet1D(amplitude, 0, width)
self._default_size = _round_up_to_odd_integer(8 * width)
super().__init__(**kwargs)
class RickerWavelet2DKernel(Kernel2D):
"""
2D Ricker wavelet filter kernel (sometimes known as a "Mexican Hat"
kernel).
The Ricker wavelet, or inverted Gaussian-Laplace filter, is a
bandpass filter. It smooths the data and removes slowly varying
or constant structures (e.g. Background). It is useful for peak or
multi-scale detection.
This kernel is derived from a normalized Gaussian function, by
computing the second derivative. This results in an amplitude
at the kernels center of 1. / (pi * width ** 4). The normalization
is the same as for `scipy.ndimage.gaussian_laplace`, except
for a minus sign.
.. note::
See https://github.com/astropy/astropy/pull/9445 for discussions
related to renaming of this kernel.
Parameters
----------
width : number
Width of the filter kernel, defined as the standard deviation
of the Gaussian function from which it is derived.
x_size : int, optional
Size in x direction of the kernel array. Default = ⌊8*width +1⌋.
y_size : int, optional
Size in y direction of the kernel array. Default = ⌊8*width +1⌋.
mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, Tophat2DKernel, Ring2DKernel,
TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import RickerWavelet2DKernel
ricker_2d_kernel = RickerWavelet2DKernel(10)
plt.imshow(ricker_2d_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_is_bool = False
def __init__(self, width, **kwargs):
amplitude = 1.0 / (np.pi * width ** 4)
self._model = models.RickerWavelet2D(amplitude, 0, 0, width)
self._default_size = _round_up_to_odd_integer(8 * width)
super().__init__(**kwargs)
class AiryDisk2DKernel(Kernel2D):
"""
2D Airy disk kernel.
This kernel models the diffraction pattern of a circular aperture.
The generated kernel is normalized so that it integrates to 1.
Parameters
----------
radius : float
The radius of the Airy disk kernel (radius of the first zero).
x_size : int, optional
Size in x direction of the kernel array. Default = ⌊8*radius + 1⌋.
y_size : int, optional
Size in y direction of the kernel array. Default = ⌊8*radius + 1⌋.
mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, Tophat2DKernel, RickerWavelet2DKernel,
Ring2DKernel, TrapezoidDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import AiryDisk2DKernel
airydisk_2D_kernel = AiryDisk2DKernel(10)
plt.imshow(airydisk_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_is_bool = False
def __init__(self, radius, **kwargs):
self._model = models.AiryDisk2D(1, 0, 0, radius)
self._default_size = _round_up_to_odd_integer(8 * radius)
super().__init__(**kwargs)
self.normalize()
class Moffat2DKernel(Kernel2D):
"""
2D Moffat kernel.
This kernel is a typical model for a seeing limited PSF.
The generated kernel is normalized so that it integrates to 1.
Parameters
----------
gamma : float
Core width of the Moffat model.
alpha : float
Power index of the Moffat model.
x_size : int, optional
Size in x direction of the kernel array. Default = ⌊8*radius + 1⌋.
y_size : int, optional
Size in y direction of the kernel array. Default = ⌊8*radius + 1⌋.
mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, Tophat2DKernel, RickerWavelet2DKernel,
Ring2DKernel, TrapezoidDisk2DKernel, AiryDisk2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Moffat2DKernel
moffat_2D_kernel = Moffat2DKernel(3, 2)
plt.imshow(moffat_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_is_bool = False
def __init__(self, gamma, alpha, **kwargs):
# Compute amplitude, from
# https://en.wikipedia.org/wiki/Moffat_distribution
amplitude = (alpha - 1.0) / (np.pi * gamma * gamma)
self._model = models.Moffat2D(amplitude, 0, 0, gamma, alpha)
self._default_size = _round_up_to_odd_integer(4.0 * self._model.fwhm)
super().__init__(**kwargs)
self.normalize()
class Model1DKernel(Kernel1D):
"""
Create kernel from 1D model.
The model has to be centered on x = 0.
Parameters
----------
model : `~astropy.modeling.Fittable1DModel`
Kernel response function model
x_size : int, optional
Size in x direction of the kernel array. Default = ⌊8*width +1⌋.
Must be odd.
mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by linearly interpolating
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
Raises
------
TypeError
If model is not an instance of `~astropy.modeling.Fittable1DModel`
See also
--------
Model2DKernel : Create kernel from `~astropy.modeling.Fittable2DModel`
CustomKernel : Create kernel from list or array
Examples
--------
Define a Gaussian1D model:
>>> from astropy.modeling.models import Gaussian1D
>>> from astropy.convolution.kernels import Model1DKernel
>>> gauss = Gaussian1D(1, 0, 2)
And create a custom one dimensional kernel from it:
>>> gauss_kernel = Model1DKernel(gauss, x_size=9)
This kernel can now be used like a usual Astropy kernel.
"""
_separable = False
_is_bool = False
def __init__(self, model, **kwargs):
if isinstance(model, Fittable1DModel):
self._model = model
else:
raise TypeError("Must be Fittable1DModel")
super().__init__(**kwargs)
class Model2DKernel(Kernel2D):
"""
Create kernel from 2D model.
The model has to be centered on x = 0 and y = 0.
Parameters
----------
model : `~astropy.modeling.Fittable2DModel`
Kernel response function model
x_size : int, optional
Size in x direction of the kernel array. Default = ⌊8*width +1⌋.
Must be odd.
y_size : int, optional
Size in y direction of the kernel array. Default = ⌊8*width +1⌋.
mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
Raises
------
TypeError
If model is not an instance of `~astropy.modeling.Fittable2DModel`
See also
--------
Model1DKernel : Create kernel from `~astropy.modeling.Fittable1DModel`
CustomKernel : Create kernel from list or array
Examples
--------
Define a Gaussian2D model:
>>> from astropy.modeling.models import Gaussian2D
>>> from astropy.convolution.kernels import Model2DKernel
>>> gauss = Gaussian2D(1, 0, 0, 2, 2)
And create a custom two dimensional kernel from it:
>>> gauss_kernel = Model2DKernel(gauss, x_size=9)
This kernel can now be used like a usual astropy kernel.
"""
_is_bool = False
_separable = False
def __init__(self, model, **kwargs):
self._separable = False
if isinstance(model, Fittable2DModel):
self._model = model
else:
raise TypeError("Must be Fittable2DModel")
super().__init__(**kwargs)
class CustomKernel(Kernel):
"""
Create filter kernel from list or array.
Parameters
----------
array : list or array
Filter kernel array. Size must be odd.
Raises
------
TypeError
If array is not a list or array.
`~astropy.convolution.KernelSizeError`
If array size is even.
See also
--------
Model2DKernel, Model1DKernel
Examples
--------
Define one dimensional array:
>>> from astropy.convolution.kernels import CustomKernel
>>> import numpy as np
>>> array = np.array([1, 2, 3, 2, 1])
>>> kernel = CustomKernel(array)
>>> kernel.dimension
1
Define two dimensional array:
>>> array = np.array([[1, 1, 1], [1, 2, 1], [1, 1, 1]])
>>> kernel = CustomKernel(array)
>>> kernel.dimension
2
"""
def __init__(self, array):
self.array = array
super().__init__(self._array)
@property
def array(self):
"""
Filter kernel array.
"""
return self._array
@array.setter
def array(self, array):
"""
Filter kernel array setter
"""
if isinstance(array, np.ndarray):
self._array = array.astype(np.float64)
elif isinstance(array, list):
self._array = np.array(array, dtype=np.float64)
else:
raise TypeError("Must be list or array.")
# Check if array is odd in all axes
if has_even_axis(self):
raise_even_kernel_exception()
# Check if array is bool
ones = self._array == 1.
zeros = self._array == 0
self._is_bool = bool(np.all(np.logical_or(ones, zeros)))
|
23265052cbd2d68a29da09e1df35eed138f44b07f76f90a301b34e6c11ee69fe | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains the convolution and filter functionalities of astropy.
A few conceptual notes:
A filter kernel is mainly characterized by its response function. In the 1D
case we speak of "impulse response function", in the 2D case we call it "point
spread function". This response function is given for every kernel by an
astropy `FittableModel`, which is evaluated on a grid to obtain a filter array,
which can then be applied to binned data.
The model is centered on the array and should have an amplitude such that the array
integrates to one per default.
Currently only symmetric 2D kernels are supported.
"""
import copy
import warnings
import numpy as np
from astropy.utils.exceptions import AstropyUserWarning
from .utils import add_kernel_arrays_1D, add_kernel_arrays_2D, discretize_model
MAX_NORMALIZATION = 100
__all__ = ['Kernel', 'Kernel1D', 'Kernel2D', 'kernel_arithmetics']
class Kernel:
"""
Convolution kernel base class.
Parameters
----------
array : ndarray
Kernel array.
"""
_separable = False
_is_bool = True
_model = None
def __init__(self, array):
self._array = np.asanyarray(array)
@property
def truncation(self):
"""
Absolute deviation of the sum of the kernel array values from
one.
"""
return np.abs(1. - self._array.sum())
@property
def is_bool(self):
"""
Indicates if kernel is bool.
If the kernel is bool the multiplication in the convolution could
be omitted, to increase the performance.
"""
return self._is_bool
@property
def model(self):
"""
Kernel response model.
"""
return self._model
@property
def dimension(self):
"""
Kernel dimension.
"""
return self.array.ndim
@property
def center(self):
"""
Index of the kernel center.
"""
return [axes_size // 2 for axes_size in self._array.shape]
def normalize(self, mode='integral'):
"""
Normalize the filter kernel.
Parameters
----------
mode : {'integral', 'peak'}
One of the following modes:
* 'integral' (default)
Kernel is normalized such that its integral = 1.
* 'peak'
Kernel is normalized such that its peak = 1.
"""
if mode == 'integral':
normalization = self._array.sum()
elif mode == 'peak':
normalization = self._array.max()
else:
raise ValueError("invalid mode, must be 'integral' or 'peak'")
# Warn the user for kernels that sum to zero
if normalization == 0:
warnings.warn('The kernel cannot be normalized because it '
'sums to zero.', AstropyUserWarning)
else:
np.divide(self._array, normalization, self._array)
self._kernel_sum = self._array.sum()
@property
def shape(self):
"""
Shape of the kernel array.
"""
return self._array.shape
@property
def separable(self):
"""
Indicates if the filter kernel is separable.
A 2D filter is separable, when its filter array can be written as the
outer product of two 1D arrays.
If a filter kernel is separable, higher dimension convolutions will be
performed by applying the 1D filter array consecutively on every dimension.
This is significantly faster, than using a filter array with the same
dimension.
"""
return self._separable
@property
def array(self):
"""
Filter kernel array.
"""
return self._array
def __add__(self, kernel):
"""
Add two filter kernels.
"""
return kernel_arithmetics(self, kernel, 'add')
def __sub__(self, kernel):
"""
Subtract two filter kernels.
"""
return kernel_arithmetics(self, kernel, 'sub')
def __mul__(self, value):
"""
Multiply kernel with number or convolve two kernels.
"""
return kernel_arithmetics(self, value, "mul")
def __rmul__(self, value):
"""
Multiply kernel with number or convolve two kernels.
"""
return kernel_arithmetics(self, value, "mul")
def __array__(self):
"""
Array representation of the kernel.
"""
return self._array
def __array_wrap__(self, array, context=None):
"""
Wrapper for multiplication with numpy arrays.
"""
if type(context[0]) == np.ufunc:
return NotImplemented
else:
return array
class Kernel1D(Kernel):
"""
Base class for 1D filter kernels.
Parameters
----------
model : `~astropy.modeling.FittableModel`
Model to be evaluated.
x_size : int or None, optional
Size of the kernel array. Default = ⌊8*width+1⌋.
Only used if ``array`` is None.
array : ndarray or None, optional
Kernel array.
width : number
Width of the filter kernel.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by linearly interpolating
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
"""
def __init__(self, model=None, x_size=None, array=None, **kwargs):
# Initialize from model
if self._model:
if array is not None:
# Reject "array" keyword for kernel models, to avoid them not being
# populated as expected.
raise TypeError("Array argument not allowed for kernel models.")
if x_size is None:
x_size = self._default_size
elif x_size != int(x_size):
raise TypeError("x_size should be an integer")
# Set ranges where to evaluate the model
if x_size % 2 == 0: # even kernel
x_range = (-(int(x_size)) // 2 + 0.5, (int(x_size)) // 2 + 0.5)
else: # odd kernel
x_range = (-(int(x_size) - 1) // 2, (int(x_size) - 1) // 2 + 1)
array = discretize_model(self._model, x_range, **kwargs)
# Initialize from array
elif array is None:
raise TypeError("Must specify either array or model.")
super().__init__(array)
class Kernel2D(Kernel):
"""
Base class for 2D filter kernels.
Parameters
----------
model : `~astropy.modeling.FittableModel`
Model to be evaluated.
x_size : int, optional
Size in x direction of the kernel array. Default = ⌊8*width + 1⌋.
Only used if ``array`` is None.
y_size : int, optional
Size in y direction of the kernel array. Default = ⌊8*width + 1⌋.
Only used if ``array`` is None,
array : ndarray or None, optional
Kernel array. Default is None.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
width : number
Width of the filter kernel.
factor : number, optional
Factor of oversampling. Default factor = 10.
"""
def __init__(self, model=None, x_size=None, y_size=None, array=None, **kwargs):
# Initialize from model
if self._model:
if array is not None:
# Reject "array" keyword for kernel models, to avoid them not being
# populated as expected.
raise TypeError("Array argument not allowed for kernel models.")
if x_size is None:
x_size = self._default_size
elif x_size != int(x_size):
raise TypeError("x_size should be an integer")
if y_size is None:
y_size = x_size
elif y_size != int(y_size):
raise TypeError("y_size should be an integer")
# Set ranges where to evaluate the model
if x_size % 2 == 0: # even kernel
x_range = (-(int(x_size)) // 2 + 0.5, (int(x_size)) // 2 + 0.5)
else: # odd kernel
x_range = (-(int(x_size) - 1) // 2, (int(x_size) - 1) // 2 + 1)
if y_size % 2 == 0: # even kernel
y_range = (-(int(y_size)) // 2 + 0.5, (int(y_size)) // 2 + 0.5)
else: # odd kernel
y_range = (-(int(y_size) - 1) // 2, (int(y_size) - 1) // 2 + 1)
array = discretize_model(self._model, x_range, y_range, **kwargs)
# Initialize from array
elif array is None:
raise TypeError("Must specify either array or model.")
super().__init__(array)
def kernel_arithmetics(kernel, value, operation):
"""
Add, subtract or multiply two kernels.
Parameters
----------
kernel : `astropy.convolution.Kernel`
Kernel instance.
value : `astropy.convolution.Kernel`, float, or int
Value to operate with.
operation : {'add', 'sub', 'mul'}
One of the following operations:
* 'add'
Add two kernels
* 'sub'
Subtract two kernels
* 'mul'
Multiply kernel with number or convolve two kernels.
"""
# 1D kernels
if isinstance(kernel, Kernel1D) and isinstance(value, Kernel1D):
if operation == "add":
new_array = add_kernel_arrays_1D(kernel.array, value.array)
if operation == "sub":
new_array = add_kernel_arrays_1D(kernel.array, -value.array)
if operation == "mul":
raise Exception("Kernel operation not supported. Maybe you want "
"to use convolve(kernel1, kernel2) instead.")
new_kernel = Kernel1D(array=new_array)
new_kernel._separable = kernel._separable and value._separable
new_kernel._is_bool = kernel._is_bool or value._is_bool
# 2D kernels
elif isinstance(kernel, Kernel2D) and isinstance(value, Kernel2D):
if operation == "add":
new_array = add_kernel_arrays_2D(kernel.array, value.array)
if operation == "sub":
new_array = add_kernel_arrays_2D(kernel.array, -value.array)
if operation == "mul":
raise Exception("Kernel operation not supported. Maybe you want "
"to use convolve(kernel1, kernel2) instead.")
new_kernel = Kernel2D(array=new_array)
new_kernel._separable = kernel._separable and value._separable
new_kernel._is_bool = kernel._is_bool or value._is_bool
# kernel and number
elif isinstance(kernel, (Kernel1D, Kernel2D)) and np.isscalar(value):
if operation == "mul":
new_kernel = copy.copy(kernel)
new_kernel._array *= value
else:
raise Exception("Kernel operation not supported.")
else:
raise Exception("Kernel operation not supported.")
return new_kernel
|
1e165f60b427b0bab9f2508652237668126556579c24c8e09131a3eda4345399 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from .convolve import (convolve, convolve_fft, convolve_models,
convolve_models_fft, interpolate_replace_nans)
from .core import *
from .kernels import *
from .utils import *
|
26ed6e9f1361689957f6abaa7b1419996eeeab8ff1fe68fff96ac3a698de1e29 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""This module contains classes and functions to standardize access to
configuration files for Astropy and affiliated packages.
.. note::
The configuration system makes use of the 'configobj' package, which stores
configuration in a text format like that used in the standard library
`ConfigParser`. More information and documentation for configobj can be
found at https://configobj.readthedocs.io .
"""
import io
import pkgutil
import warnings
import importlib
import contextlib
import os
from os import path
from textwrap import TextWrapper
from warnings import warn
from contextlib import contextmanager, nullcontext
from astropy.extern.configobj import configobj, validate
from astropy.utils import find_current_module, silence
from astropy.utils.decorators import deprecated
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning
from astropy.utils.introspection import resolve_name
from .paths import get_config_dir
__all__ = ('InvalidConfigurationItemWarning', 'ConfigurationMissingWarning',
'get_config', 'reload_config', 'ConfigNamespace', 'ConfigItem',
'generate_config', 'create_config_file')
class InvalidConfigurationItemWarning(AstropyWarning):
""" A Warning that is issued when the configuration value specified in the
astropy configuration file does not match the type expected for that
configuration value.
"""
# This was raised with Astropy < 4.3 when the configuration file was not found.
# It is kept for compatibility and should be removed at some point.
@deprecated('5.0')
class ConfigurationMissingWarning(AstropyWarning):
""" A Warning that is issued when the configuration directory cannot be
accessed (usually due to a permissions problem). If this warning appears,
configuration items will be set to their defaults rather than read from the
configuration file, and no configuration will persist across sessions.
"""
# these are not in __all__ because it's not intended that a user ever see them
class ConfigurationDefaultMissingError(ValueError):
""" An exception that is raised when the configuration defaults (which
should be generated at build-time) are missing.
"""
# this is used in astropy/__init__.py
class ConfigurationDefaultMissingWarning(AstropyWarning):
""" A warning that is issued when the configuration defaults (which
should be generated at build-time) are missing.
"""
class ConfigurationChangedWarning(AstropyWarning):
"""
A warning that the configuration options have changed.
"""
class _ConfigNamespaceMeta(type):
def __init__(cls, name, bases, dict):
if cls.__bases__[0] is object:
return
for key, val in dict.items():
if isinstance(val, ConfigItem):
val.name = key
class ConfigNamespace(metaclass=_ConfigNamespaceMeta):
"""
A namespace of configuration items. Each subpackage with
configuration items should define a subclass of this class,
containing `ConfigItem` instances as members.
For example::
class Conf(_config.ConfigNamespace):
unicode_output = _config.ConfigItem(
False,
'Use Unicode characters when outputting values, ...')
use_color = _config.ConfigItem(
sys.platform != 'win32',
'When True, use ANSI color escape sequences when ...',
aliases=['astropy.utils.console.USE_COLOR'])
conf = Conf()
"""
def __iter__(self):
for key, val in self.__class__.__dict__.items():
if isinstance(val, ConfigItem):
yield key
keys = __iter__
"""Iterate over configuration item names."""
def values(self):
"""Iterate over configuration item values."""
for val in self.__class__.__dict__.values():
if isinstance(val, ConfigItem):
yield val
def items(self):
"""Iterate over configuration item ``(name, value)`` pairs."""
for key, val in self.__class__.__dict__.items():
if isinstance(val, ConfigItem):
yield key, val
def set_temp(self, attr, value):
"""
Temporarily set a configuration value.
Parameters
----------
attr : str
Configuration item name
value : object
The value to set temporarily.
Examples
--------
>>> import astropy
>>> with astropy.conf.set_temp('use_color', False):
... pass
... # console output will not contain color
>>> # console output contains color again...
"""
if hasattr(self, attr):
return self.__class__.__dict__[attr].set_temp(value)
raise AttributeError(f"No configuration parameter '{attr}'")
def reload(self, attr=None):
"""
Reload a configuration item from the configuration file.
Parameters
----------
attr : str, optional
The name of the configuration parameter to reload. If not
provided, reload all configuration parameters.
"""
if attr is not None:
if hasattr(self, attr):
return self.__class__.__dict__[attr].reload()
raise AttributeError(f"No configuration parameter '{attr}'")
for item in self.values():
item.reload()
def reset(self, attr=None):
"""
Reset a configuration item to its default.
Parameters
----------
attr : str, optional
The name of the configuration parameter to reload. If not
provided, reset all configuration parameters.
"""
if attr is not None:
if hasattr(self, attr):
prop = self.__class__.__dict__[attr]
prop.set(prop.defaultvalue)
return
raise AttributeError(f"No configuration parameter '{attr}'")
for item in self.values():
item.set(item.defaultvalue)
class ConfigItem:
"""
A setting and associated value stored in a configuration file.
These objects should be created as members of
`ConfigNamespace` subclasses, for example::
class _Conf(config.ConfigNamespace):
unicode_output = config.ConfigItem(
False,
'Use Unicode characters when outputting values, and writing widgets '
'to the console.')
conf = _Conf()
Parameters
----------
defaultvalue : object, optional
The default value for this item. If this is a list of strings, this
item will be interpreted as an 'options' value - this item must be one
of those values, and the first in the list will be taken as the default
value.
description : str or None, optional
A description of this item (will be shown as a comment in the
configuration file)
cfgtype : str or None, optional
A type specifier like those used as the *values* of a particular key
in a ``configspec`` file of ``configobj``. If None, the type will be
inferred from the default value.
module : str or None, optional
The full module name that this item is associated with. The first
element (e.g. 'astropy' if this is 'astropy.config.configuration')
will be used to determine the name of the configuration file, while
the remaining items determine the section. If None, the package will be
inferred from the package within which this object's initializer is
called.
aliases : str, or list of str, optional
The deprecated location(s) of this configuration item. If the
config item is not found at the new location, it will be
searched for at all of the old locations.
Raises
------
RuntimeError
If ``module`` is `None`, but the module this item is created from
cannot be determined.
"""
# this is used to make validation faster so a Validator object doesn't
# have to be created every time
_validator = validate.Validator()
cfgtype = None
"""
A type specifier like those used as the *values* of a particular key in a
``configspec`` file of ``configobj``.
"""
rootname = 'astropy'
"""
Rootname sets the base path for all config files.
"""
def __init__(self, defaultvalue='', description=None, cfgtype=None,
module=None, aliases=None):
from astropy.utils import isiterable
if module is None:
module = find_current_module(2)
if module is None:
msg1 = 'Cannot automatically determine get_config module, '
msg2 = 'because it is not called from inside a valid module'
raise RuntimeError(msg1 + msg2)
else:
module = module.__name__
self.module = module
self.description = description
self.__doc__ = description
# now determine cfgtype if it is not given
if cfgtype is None:
if (isiterable(defaultvalue) and not
isinstance(defaultvalue, str)):
# it is an options list
dvstr = [str(v) for v in defaultvalue]
cfgtype = 'option(' + ', '.join(dvstr) + ')'
defaultvalue = dvstr[0]
elif isinstance(defaultvalue, bool):
cfgtype = 'boolean'
elif isinstance(defaultvalue, int):
cfgtype = 'integer'
elif isinstance(defaultvalue, float):
cfgtype = 'float'
elif isinstance(defaultvalue, str):
cfgtype = 'string'
defaultvalue = str(defaultvalue)
self.cfgtype = cfgtype
self._validate_val(defaultvalue)
self.defaultvalue = defaultvalue
if aliases is None:
self.aliases = []
elif isinstance(aliases, str):
self.aliases = [aliases]
else:
self.aliases = aliases
def __set__(self, obj, value):
return self.set(value)
def __get__(self, obj, objtype=None):
if obj is None:
return self
return self()
def set(self, value):
"""
Sets the current value of this ``ConfigItem``.
This also updates the comments that give the description and type
information.
Parameters
----------
value
The value this item should be set to.
Raises
------
TypeError
If the provided ``value`` is not valid for this ``ConfigItem``.
"""
try:
value = self._validate_val(value)
except validate.ValidateError as e:
msg = 'Provided value for configuration item {0} not valid: {1}'
raise TypeError(msg.format(self.name, e.args[0]))
sec = get_config(self.module, rootname=self.rootname)
sec[self.name] = value
@contextmanager
def set_temp(self, value):
"""
Sets this item to a specified value only inside a with block.
Use as::
ITEM = ConfigItem('ITEM', 'default', 'description')
with ITEM.set_temp('newval'):
#... do something that wants ITEM's value to be 'newval' ...
print(ITEM)
# ITEM is now 'default' after the with block
Parameters
----------
value
The value to set this item to inside the with block.
"""
initval = self()
self.set(value)
try:
yield
finally:
self.set(initval)
def reload(self):
""" Reloads the value of this ``ConfigItem`` from the relevant
configuration file.
Returns
-------
val : object
The new value loaded from the configuration file.
"""
self.set(self.defaultvalue)
baseobj = get_config(self.module, True, rootname=self.rootname)
secname = baseobj.name
cobj = baseobj
# a ConfigObj's parent is itself, so we look for the parent with that
while cobj.parent is not cobj:
cobj = cobj.parent
newobj = configobj.ConfigObj(cobj.filename, interpolation=False)
if secname is not None:
if secname not in newobj:
return baseobj.get(self.name)
newobj = newobj[secname]
if self.name in newobj:
baseobj[self.name] = newobj[self.name]
return baseobj.get(self.name)
def __repr__(self):
out = '<{}: name={!r} value={!r} at 0x{:x}>'.format(
self.__class__.__name__, self.name, self(), id(self))
return out
def __str__(self):
out = '\n'.join(('{0}: {1}',
' cfgtype={2!r}',
' defaultvalue={3!r}',
' description={4!r}',
' module={5}',
' value={6!r}'))
out = out.format(self.__class__.__name__, self.name, self.cfgtype,
self.defaultvalue, self.description, self.module,
self())
return out
def __call__(self):
""" Returns the value of this ``ConfigItem``
Returns
-------
val : object
This item's value, with a type determined by the ``cfgtype``
attribute.
Raises
------
TypeError
If the configuration value as stored is not this item's type.
"""
def section_name(section):
if section == '':
return 'at the top-level'
else:
return f'in section [{section}]'
options = []
sec = get_config(self.module, rootname=self.rootname)
if self.name in sec:
options.append((sec[self.name], self.module, self.name))
for alias in self.aliases:
module, name = alias.rsplit('.', 1)
sec = get_config(module, rootname=self.rootname)
if '.' in module:
filename, module = module.split('.', 1)
else:
filename = module
module = ''
if name in sec:
if '.' in self.module:
new_module = self.module.split('.', 1)[1]
else:
new_module = ''
warn(
"Config parameter '{}' {} of the file '{}' "
"is deprecated. Use '{}' {} instead.".format(
name, section_name(module), get_config_filename(filename,
rootname=self.rootname),
self.name, section_name(new_module)),
AstropyDeprecationWarning)
options.append((sec[name], module, name))
if len(options) == 0:
self.set(self.defaultvalue)
options.append((self.defaultvalue, None, None))
if len(options) > 1:
filename, sec = self.module.split('.', 1)
warn(
"Config parameter '{}' {} of the file '{}' is "
"given by more than one alias ({}). Using the first.".format(
self.name, section_name(sec), get_config_filename(filename,
rootname=self.rootname),
', '.join([
'.'.join(x[1:3]) for x in options if x[1] is not None])),
AstropyDeprecationWarning)
val = options[0][0]
try:
return self._validate_val(val)
except validate.ValidateError as e:
raise TypeError('Configuration value not valid:' + e.args[0])
def _validate_val(self, val):
""" Validates the provided value based on cfgtype and returns the
type-cast value
throws the underlying configobj exception if it fails
"""
# note that this will normally use the *class* attribute `_validator`,
# but if some arcane reason is needed for making a special one for an
# instance or sub-class, it will be used
return self._validator.check(self.cfgtype, val)
# this dictionary stores the primary copy of the ConfigObj's for each
# root package
_cfgobjs = {}
def get_config_filename(packageormod=None, rootname=None):
"""
Get the filename of the config file associated with the given
package or module.
"""
cfg = get_config(packageormod, rootname=rootname)
while cfg.parent is not cfg:
cfg = cfg.parent
return cfg.filename
# This is used by testing to override the config file, so we can test
# with various config files that exercise different features of the
# config system.
_override_config_file = None
def get_config(packageormod=None, reload=False, rootname=None):
""" Gets the configuration object or section associated with a particular
package or module.
Parameters
----------
packageormod : str or None
The package for which to retrieve the configuration object. If a
string, it must be a valid package name, or if ``None``, the package from
which this function is called will be used.
reload : bool, optional
Reload the file, even if we have it cached.
rootname : str or None
Name of the root configuration directory. If ``None`` and
``packageormod`` is ``None``, this defaults to be the name of
the package from which this function is called. If ``None`` and
``packageormod`` is not ``None``, this defaults to ``astropy``.
Returns
-------
cfgobj : ``configobj.ConfigObj`` or ``configobj.Section``
If the requested package is a base package, this will be the
``configobj.ConfigObj`` for that package, or if it is a subpackage or
module, it will return the relevant ``configobj.Section`` object.
Raises
------
RuntimeError
If ``packageormod`` is `None`, but the package this item is created
from cannot be determined.
"""
if packageormod is None:
packageormod = find_current_module(2)
if packageormod is None:
msg1 = 'Cannot automatically determine get_config module, '
msg2 = 'because it is not called from inside a valid module'
raise RuntimeError(msg1 + msg2)
else:
packageormod = packageormod.__name__
_autopkg = True
else:
_autopkg = False
packageormodspl = packageormod.split('.')
pkgname = packageormodspl[0]
secname = '.'.join(packageormodspl[1:])
if rootname is None:
if _autopkg:
rootname = pkgname
else:
rootname = 'astropy' # so we don't break affiliated packages
cobj = _cfgobjs.get(pkgname, None)
if cobj is None or reload:
cfgfn = None
try:
# This feature is intended only for use by the unit tests
if _override_config_file is not None:
cfgfn = _override_config_file
else:
cfgfn = path.join(get_config_dir(rootname=rootname), pkgname + '.cfg')
cobj = configobj.ConfigObj(cfgfn, interpolation=False)
except OSError:
# This can happen when HOME is not set
cobj = configobj.ConfigObj(interpolation=False)
# This caches the object, so if the file becomes accessible, this
# function won't see it unless the module is reloaded
_cfgobjs[pkgname] = cobj
if secname: # not the root package
if secname not in cobj:
cobj[secname] = {}
return cobj[secname]
else:
return cobj
def generate_config(pkgname='astropy', filename=None, verbose=False):
"""Generates a configuration file, from the list of `ConfigItem`
objects for each subpackage.
.. versionadded:: 4.1
Parameters
----------
pkgname : str or None
The package for which to retrieve the configuration object.
filename : str or file-like or None
If None, the default configuration path is taken from `get_config`.
"""
if verbose:
verbosity = nullcontext
filter_warnings = AstropyDeprecationWarning
else:
verbosity = silence
filter_warnings = Warning
package = importlib.import_module(pkgname)
with verbosity(), warnings.catch_warnings():
warnings.simplefilter('ignore', category=filter_warnings)
for mod in pkgutil.walk_packages(path=package.__path__,
prefix=package.__name__ + '.'):
if (mod.module_finder.path.endswith(('test', 'tests')) or
mod.name.endswith('setup_package')):
# Skip test and setup_package modules
continue
if mod.name.split('.')[-1].startswith('_'):
# Skip private modules
continue
with contextlib.suppress(ImportError):
importlib.import_module(mod.name)
wrapper = TextWrapper(initial_indent="## ", subsequent_indent='## ',
width=78)
if filename is None:
filename = get_config_filename(pkgname)
with contextlib.ExitStack() as stack:
if isinstance(filename, (str, os.PathLike)):
fp = stack.enter_context(open(filename, 'w'))
else:
# assume it's a file object, or io.StringIO
fp = filename
# Parse the subclasses, ordered by their module name
subclasses = ConfigNamespace.__subclasses__()
processed = set()
for conf in sorted(subclasses, key=lambda x: x.__module__):
mod = conf.__module__
# Skip modules for other packages, e.g. astropy modules that
# would be imported when running the function for astroquery.
if mod.split('.')[0] != pkgname:
continue
# Check that modules are not processed twice, which can happen
# when they are imported in another module.
if mod in processed:
continue
else:
processed.add(mod)
print_module = True
for item in conf().values():
if print_module:
# If this is the first item of the module, we print the
# module name, but not if this is the root package...
if item.module != pkgname:
modname = item.module.replace(f'{pkgname}.', '')
fp.write(f"[{modname}]\n\n")
print_module = False
fp.write(wrapper.fill(item.description) + '\n')
if isinstance(item.defaultvalue, (tuple, list)):
if len(item.defaultvalue) == 0:
fp.write(f'# {item.name} = ,\n\n')
elif len(item.defaultvalue) == 1:
fp.write(f'# {item.name} = {item.defaultvalue[0]},\n\n')
else:
fp.write(f'# {item.name} = {",".join(map(str, item.defaultvalue))}\n\n')
else:
fp.write(f'# {item.name} = {item.defaultvalue}\n\n')
def reload_config(packageormod=None, rootname=None):
""" Reloads configuration settings from a configuration file for the root
package of the requested package/module.
This overwrites any changes that may have been made in `ConfigItem`
objects. This applies for any items that are based on this file, which
is determined by the *root* package of ``packageormod``
(e.g. ``'astropy.cfg'`` for the ``'astropy.config.configuration'``
module).
Parameters
----------
packageormod : str or None
The package or module name - see `get_config` for details.
rootname : str or None
Name of the root configuration directory - see `get_config`
for details.
"""
sec = get_config(packageormod, True, rootname=rootname)
# look for the section that is its own parent - that's the base object
while sec.parent is not sec:
sec = sec.parent
sec.reload()
def is_unedited_config_file(content, template_content=None):
"""
Determines if a config file can be safely replaced because it doesn't
actually contain any meaningful content, i.e. if it contains only comments
or is completely empty.
"""
buffer = io.StringIO(content)
raw_cfg = configobj.ConfigObj(buffer, interpolation=True)
# If any of the items is set, return False
return not any(len(v) > 0 for v in raw_cfg.values())
# This function is no more used by astropy but it is kept for the other
# packages that may use it (e.g. astroquery). It should be removed at some
# point.
# this is not in __all__ because it's not intended that a user uses it
@deprecated('5.0')
def update_default_config(pkg, default_cfg_dir_or_fn, version=None, rootname='astropy'):
"""
Checks if the configuration file for the specified package exists,
and if not, copy over the default configuration. If the
configuration file looks like it has already been edited, we do
not write over it, but instead write a file alongside it named
``pkg.version.cfg`` as a "template" for the user.
Parameters
----------
pkg : str
The package to be updated.
default_cfg_dir_or_fn : str
The filename or directory name where the default configuration file is.
If a directory name, ``'pkg.cfg'`` will be used in that directory.
version : str, optional
The current version of the given package. If not provided, it will
be obtained from ``pkg.__version__``.
rootname : str
Name of the root configuration directory.
Returns
-------
updated : bool
If the profile was updated, `True`, otherwise `False`.
Raises
------
AttributeError
If the version number of the package could not determined.
"""
if path.isdir(default_cfg_dir_or_fn):
default_cfgfn = path.join(default_cfg_dir_or_fn, pkg + '.cfg')
else:
default_cfgfn = default_cfg_dir_or_fn
if not path.isfile(default_cfgfn):
# There is no template configuration file, which basically
# means the affiliated package is not using the configuration
# system, so just return.
return False
cfgfn = get_config(pkg, rootname=rootname).filename
with open(default_cfgfn, encoding='latin-1') as fr:
template_content = fr.read()
doupdate = False
if cfgfn is not None:
if path.exists(cfgfn):
with open(cfgfn, encoding='latin-1') as fd:
content = fd.read()
identical = (content == template_content)
if not identical:
doupdate = is_unedited_config_file(
content, template_content)
elif path.exists(path.dirname(cfgfn)):
doupdate = True
identical = False
if version is None:
version = resolve_name(pkg, '__version__')
# Don't install template files for dev versions, or we'll end up
# spamming `~/.astropy/config`.
if version and 'dev' not in version and cfgfn is not None:
template_path = path.join(
get_config_dir(rootname=rootname), f'{pkg}.{version}.cfg')
needs_template = not path.exists(template_path)
else:
needs_template = False
if doupdate or needs_template:
if needs_template:
with open(template_path, 'wt', encoding='latin-1') as fw:
fw.write(template_content)
# If we just installed a new template file and we can't
# update the main configuration file because it has user
# changes, display a warning.
if not identical and not doupdate:
warn(
"The configuration options in {} {} may have changed, "
"your configuration file was not updated in order to "
"preserve local changes. A new configuration template "
"has been saved to '{}'.".format(
pkg, version, template_path),
ConfigurationChangedWarning)
if doupdate and not identical:
with open(cfgfn, 'wt', encoding='latin-1') as fw:
fw.write(template_content)
return True
return False
def create_config_file(pkg, rootname='astropy', overwrite=False):
"""
Create the default configuration file for the specified package.
If the file already exists, it is updated only if it has not been
modified. Otherwise the ``overwrite`` flag is needed to overwrite it.
Parameters
----------
pkg : str
The package to be updated.
rootname : str
Name of the root configuration directory.
overwrite : bool
Force updating the file if it already exists.
Returns
-------
updated : bool
If the profile was updated, `True`, otherwise `False`.
"""
# local import to prevent using the logger before it is configured
from astropy.logger import log
cfgfn = get_config_filename(pkg, rootname=rootname)
# generate the default config template
template_content = io.StringIO()
generate_config(pkg, template_content)
template_content.seek(0)
template_content = template_content.read()
doupdate = True
# if the file already exists, check that it has not been modified
if cfgfn is not None and path.exists(cfgfn):
with open(cfgfn, encoding='latin-1') as fd:
content = fd.read()
doupdate = is_unedited_config_file(content, template_content)
if doupdate or overwrite:
with open(cfgfn, 'wt', encoding='latin-1') as fw:
fw.write(template_content)
log.info('The configuration file has been successfully written '
f'to {cfgfn}')
return True
elif not doupdate:
log.warning('The configuration file already exists and seems to '
'have been customized, so it has not been updated. '
'Use overwrite=True if you really want to update it.')
return False
|
29350890288ed98dcf8259f35eec8d61d12d29fdaf37e367344a90f90d70aeb0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Distribution class and associated machinery.
"""
import builtins
import numpy as np
from astropy import units as u
from astropy import stats
__all__ = ['Distribution']
# we set this by hand because the symbolic expression (below) requires scipy
# SMAD_SCALE_FACTOR = 1 / scipy.stats.norm.ppf(0.75)
SMAD_SCALE_FACTOR = 1.48260221850560203193936104071326553821563720703125
class Distribution:
"""
A scalar value or array values with associated uncertainty distribution.
This object will take its exact type from whatever the ``samples`` argument
is. In general this is expected to be an `~astropy.units.Quantity` or
`numpy.ndarray`, although anything compatible with `numpy.asanyarray` is
possible.
See also: https://docs.astropy.org/en/stable/uncertainty/
Parameters
----------
samples : array-like
The distribution, with sampling along the *leading* axis. If 1D, the
sole dimension is used as the sampling axis (i.e., it is a scalar
distribution).
"""
_generated_subclasses = {}
def __new__(cls, samples):
if isinstance(samples, Distribution):
samples = samples.distribution
else:
samples = np.asanyarray(samples, order='C')
if samples.shape == ():
raise TypeError('Attempted to initialize a Distribution with a scalar')
new_dtype = np.dtype({'names': ['samples'],
'formats': [(samples.dtype, (samples.shape[-1],))]})
samples_cls = type(samples)
new_cls = cls._generated_subclasses.get(samples_cls)
if new_cls is None:
# Make a new class with the combined name, inserting Distribution
# itself below the samples class since that way Quantity methods
# like ".to" just work (as .view() gets intercepted). However,
# repr and str are problems, so we put those on top.
# TODO: try to deal with this at the lower level. The problem is
# that array2string does not allow one to override how structured
# arrays are typeset, leading to all samples to be shown. It may
# be possible to hack oneself out by temporarily becoming a void.
new_name = samples_cls.__name__ + cls.__name__
new_cls = type(
new_name,
(_DistributionRepr, samples_cls, ArrayDistribution),
{'_samples_cls': samples_cls})
cls._generated_subclasses[samples_cls] = new_cls
self = samples.view(dtype=new_dtype, type=new_cls)
# Get rid of trailing dimension of 1.
self.shape = samples.shape[:-1]
return self
@property
def distribution(self):
return self['samples']
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
converted = []
outputs = kwargs.pop('out', None)
if outputs:
kwargs['out'] = tuple((output.distribution if
isinstance(output, Distribution)
else output) for output in outputs)
if method in {'reduce', 'accumulate', 'reduceat'}:
axis = kwargs.get('axis', None)
if axis is None:
assert isinstance(inputs[0], Distribution)
kwargs['axis'] = tuple(range(inputs[0].ndim))
for input_ in inputs:
if isinstance(input_, Distribution):
converted.append(input_.distribution)
else:
shape = getattr(input_, 'shape', ())
if shape:
converted.append(input_[..., np.newaxis])
else:
converted.append(input_)
results = getattr(ufunc, method)(*converted, **kwargs)
if not isinstance(results, tuple):
results = (results,)
if outputs is None:
outputs = (None,) * len(results)
finals = []
for result, output in zip(results, outputs):
if output is not None:
finals.append(output)
else:
if getattr(result, 'shape', False):
finals.append(Distribution(result))
else:
finals.append(result)
return finals if len(finals) > 1 else finals[0]
@property
def n_samples(self):
"""
The number of samples of this distribution. A single `int`.
"""
return self.dtype['samples'].shape[0]
def pdf_mean(self, dtype=None, out=None):
"""
The mean of this distribution.
Arguments are as for `numpy.mean`.
"""
return self.distribution.mean(axis=-1, dtype=dtype, out=out)
def pdf_std(self, dtype=None, out=None, ddof=0):
"""
The standard deviation of this distribution.
Arguments are as for `numpy.std`.
"""
return self.distribution.std(axis=-1, dtype=dtype, out=out, ddof=ddof)
def pdf_var(self, dtype=None, out=None, ddof=0):
"""
The variance of this distribution.
Arguments are as for `numpy.var`.
"""
return self.distribution.var(axis=-1, dtype=dtype, out=out, ddof=ddof)
def pdf_median(self, out=None):
"""
The median of this distribution.
Parameters
----------
out : array, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
"""
return np.median(self.distribution, axis=-1, out=out)
def pdf_mad(self, out=None):
"""
The median absolute deviation of this distribution.
Parameters
----------
out : array, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
"""
median = self.pdf_median(out=out)
absdiff = np.abs(self - median)
return np.median(absdiff.distribution, axis=-1, out=median,
overwrite_input=True)
def pdf_smad(self, out=None):
"""
The median absolute deviation of this distribution rescaled to match the
standard deviation for a normal distribution.
Parameters
----------
out : array, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
"""
result = self.pdf_mad(out=out)
result *= SMAD_SCALE_FACTOR
return result
def pdf_percentiles(self, percentile, **kwargs):
"""
Compute percentiles of this Distribution.
Parameters
----------
percentile : float or array of float or `~astropy.units.Quantity`
The desired percentiles of the distribution (i.e., on [0,100]).
`~astropy.units.Quantity` will be converted to percent, meaning
that a ``dimensionless_unscaled`` `~astropy.units.Quantity` will
be interpreted as a quantile.
Additional keywords are passed into `numpy.percentile`.
Returns
-------
percentiles : `~astropy.units.Quantity` ['dimensionless']
The ``fracs`` percentiles of this distribution.
"""
percentile = u.Quantity(percentile, u.percent).value
percs = np.percentile(self.distribution, percentile, axis=-1, **kwargs)
# numpy.percentile strips units for unclear reasons, so we have to make
# a new object with units
if hasattr(self.distribution, '_new_view'):
return self.distribution._new_view(percs)
else:
return percs
def pdf_histogram(self, **kwargs):
"""
Compute histogram over the samples in the distribution.
Parameters
----------
All keyword arguments are passed into `astropy.stats.histogram`. Note
That some of these options may not be valid for some multidimensional
distributions.
Returns
-------
hist : array
The values of the histogram. Trailing dimension is the histogram
dimension.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``. Trailing dimension is the
bin histogram dimension.
"""
distr = self.distribution
raveled_distr = distr.reshape(distr.size//distr.shape[-1], distr.shape[-1])
nhists = []
bin_edges = []
for d in raveled_distr:
nhist, bin_edge = stats.histogram(d, **kwargs)
nhists.append(nhist)
bin_edges.append(bin_edge)
nhists = np.array(nhists)
nh_shape = self.shape + (nhists.size//self.size,)
bin_edges = np.array(bin_edges)
be_shape = self.shape + (bin_edges.size//self.size,)
return nhists.reshape(nh_shape), bin_edges.reshape(be_shape)
class ScalarDistribution(Distribution, np.void):
"""Scalar distribution.
This class mostly exists to make `~numpy.array2print` possible for
all subclasses. It is a scalar element, still with n_samples samples.
"""
pass
class ArrayDistribution(Distribution, np.ndarray):
# This includes the important override of view and __getitem__
# which are needed for all ndarray subclass Distributions, but not
# for the scalar one.
_samples_cls = np.ndarray
# Override view so that we stay a Distribution version of the new type.
def view(self, dtype=None, type=None):
"""New view of array with the same data.
Like `~numpy.ndarray.view` except that the result will always be a new
`~astropy.uncertainty.Distribution` instance. If the requested
``type`` is a `~astropy.uncertainty.Distribution`, then no change in
``dtype`` is allowed.
"""
if type is None and (isinstance(dtype, builtins.type)
and issubclass(dtype, np.ndarray)):
type = dtype
dtype = None
view_args = [item for item in (dtype, type) if item is not None]
if type is None or (isinstance(type, builtins.type)
and issubclass(type, Distribution)):
if dtype is not None and dtype != self.dtype:
raise ValueError('cannot view as Distribution subclass with a new dtype.')
return super().view(*view_args)
# View as the new non-Distribution class, but turn into a Distribution again.
result = self.distribution.view(*view_args)
return Distribution(result)
# Override __getitem__ so that 'samples' is returned as the sample class.
def __getitem__(self, item):
result = super().__getitem__(item)
if item == 'samples':
# Here, we need to avoid our own redefinition of view.
return super(ArrayDistribution, result).view(self._samples_cls)
elif isinstance(result, np.void):
return result.view((ScalarDistribution, result.dtype))
else:
return result
class _DistributionRepr:
def __repr__(self):
reprarr = repr(self.distribution)
if reprarr.endswith('>'):
firstspace = reprarr.find(' ')
reprarr = reprarr[firstspace+1:-1] # :-1] removes the ending '>'
return '<{} {} with n_samples={}>'.format(self.__class__.__name__,
reprarr, self.n_samples)
else: # numpy array-like
firstparen = reprarr.find('(')
reprarr = reprarr[firstparen:]
return f'{self.__class__.__name__}{reprarr} with n_samples={self.n_samples}'
return reprarr
def __str__(self):
distrstr = str(self.distribution)
toadd = f' with n_samples={self.n_samples}'
return distrstr + toadd
def _repr_latex_(self):
if hasattr(self.distribution, '_repr_latex_'):
superlatex = self.distribution._repr_latex_()
toadd = fr', \; n_{{\rm samp}}={self.n_samples}'
return superlatex[:-1] + toadd + superlatex[-1]
else:
return None
class NdarrayDistribution(_DistributionRepr, ArrayDistribution):
pass
# Ensure our base NdarrayDistribution is known.
Distribution._generated_subclasses[np.ndarray] = NdarrayDistribution
|
e17b00c3b571b615d3ad768661fbcd9f7da3825565a17f8d2ccc6961355ae088 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Built-in distribution-creation functions.
"""
from warnings import warn
import numpy as np
from astropy import units as u
from .core import Distribution
__all__ = ['normal', 'poisson', 'uniform']
def normal(center, *, std=None, var=None, ivar=None, n_samples,
cls=Distribution, **kwargs):
"""
Create a Gaussian/normal distribution.
Parameters
----------
center : `~astropy.units.Quantity`
The center of this distribution
std : `~astropy.units.Quantity` or None
The standard deviation/σ of this distribution. Shape must match and unit
must be compatible with ``center``, or be `None` (if ``var`` or ``ivar``
are set).
var : `~astropy.units.Quantity` or None
The variance of this distribution. Shape must match and unit must be
compatible with ``center``, or be `None` (if ``std`` or ``ivar`` are set).
ivar : `~astropy.units.Quantity` or None
The inverse variance of this distribution. Shape must match and unit
must be compatible with ``center``, or be `None` (if ``std`` or ``var``
are set).
n_samples : int
The number of Monte Carlo samples to use with this distribution
cls : class
The class to use to create this distribution. Typically a
`Distribution` subclass.
Remaining keywords are passed into the constructor of the ``cls``
Returns
-------
distr : `~astropy.uncertainty.Distribution` or object
The sampled Gaussian distribution.
The type will be the same as the parameter ``cls``.
"""
center = np.asanyarray(center)
if var is not None:
if std is None:
std = np.asanyarray(var)**0.5
else:
raise ValueError('normal cannot take both std and var')
if ivar is not None:
if std is None:
std = np.asanyarray(ivar)**-0.5
else:
raise ValueError('normal cannot take both ivar and '
'and std or var')
if std is None:
raise ValueError('normal requires one of std, var, or ivar')
else:
std = np.asanyarray(std)
randshape = np.broadcast(std, center).shape + (n_samples,)
samples = center[..., np.newaxis] + np.random.randn(*randshape) * std[..., np.newaxis]
return cls(samples, **kwargs)
COUNT_UNITS = (u.count, u.electron, u.dimensionless_unscaled, u.chan, u.bin, u.vox, u.bit, u.byte)
def poisson(center, n_samples, cls=Distribution, **kwargs):
"""
Create a Poisson distribution.
Parameters
----------
center : `~astropy.units.Quantity`
The center value of this distribution (i.e., λ).
n_samples : int
The number of Monte Carlo samples to use with this distribution
cls : class
The class to use to create this distribution. Typically a
`Distribution` subclass.
Remaining keywords are passed into the constructor of the ``cls``
Returns
-------
distr : `~astropy.uncertainty.Distribution` or object
The sampled Poisson distribution.
The type will be the same as the parameter ``cls``.
"""
# we convert to arrays because np.random.poisson has trouble with quantities
has_unit = False
if hasattr(center, 'unit'):
has_unit = True
poissonarr = np.asanyarray(center.value)
else:
poissonarr = np.asanyarray(center)
randshape = poissonarr.shape + (n_samples,)
samples = np.random.poisson(poissonarr[..., np.newaxis], randshape)
if has_unit:
if center.unit == u.adu:
warn('ADUs were provided to poisson. ADUs are not strictly count'
'units because they need the gain to be applied. It is '
'recommended you apply the gain to convert to e.g. electrons.')
elif center.unit not in COUNT_UNITS:
warn('Unit {} was provided to poisson, which is not one of {}, '
'and therefore suspect as a "counting" unit. Ensure you mean '
'to use Poisson statistics.'.format(center.unit, COUNT_UNITS))
# re-attach the unit
samples = samples * center.unit
return cls(samples, **kwargs)
def uniform(*, lower=None, upper=None, center=None, width=None, n_samples,
cls=Distribution, **kwargs):
"""
Create a Uniform distriution from the lower and upper bounds.
Note that this function requires keywords to be explicit, and requires
either ``lower``/``upper`` or ``center``/``width``.
Parameters
----------
lower : array-like
The lower edge of this distribution. If a `~astropy.units.Quantity`, the
distribution will have the same units as ``lower``.
upper : `~astropy.units.Quantity`
The upper edge of this distribution. Must match shape and if a
`~astropy.units.Quantity` must have compatible units with ``lower``.
center : array-like
The center value of the distribution. Cannot be provided at the same
time as ``lower``/``upper``.
width : array-like
The width of the distribution. Must have the same shape and compatible
units with ``center`` (if any).
n_samples : int
The number of Monte Carlo samples to use with this distribution
cls : class
The class to use to create this distribution. Typically a
`Distribution` subclass.
Remaining keywords are passed into the constructor of the ``cls``
Returns
-------
distr : `~astropy.uncertainty.Distribution` or object
The sampled uniform distribution.
The type will be the same as the parameter ``cls``.
"""
if center is None and width is None:
lower = np.asanyarray(lower)
upper = np.asanyarray(upper)
if lower.shape != upper.shape:
raise ValueError('lower and upper must have consistent shapes')
elif upper is None and lower is None:
center = np.asanyarray(center)
width = np.asanyarray(width)
lower = center - width/2
upper = center + width/2
else:
raise ValueError('either upper/lower or center/width must be given '
'to uniform - other combinations are not valid')
newshape = lower.shape + (n_samples,)
if lower.shape == tuple() and upper.shape == tuple():
width = upper - lower # scalar
else:
width = (upper - lower)[:, np.newaxis]
lower = lower[:, np.newaxis]
samples = lower + width * np.random.uniform(size=newshape)
return cls(samples, **kwargs)
|
05b8cfa95cb2804190faa3deeda92a1bd9cc650cc49f6db0266c1448e2a9f41d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This sub-package contains classes and functions for creating distributions that
work similar to `~astropy.units.Quantity` or array objects, but can propagate
uncertainties.
"""
from .core import *
from .distributions import *
|
01d4661cd76d3b63cbe4636614807a4aea16a680e9d773f2462dfeb3115720b1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module implements classes (called Fitters) which combine optimization
algorithms (typically from `scipy.optimize`) with statistic functions to perform
fitting. Fitters are implemented as callable classes. In addition to the data
to fit, the ``__call__`` method takes an instance of
`~astropy.modeling.core.FittableModel` as input, and returns a copy of the
model with its parameters determined by the optimizer.
Optimization algorithms, called "optimizers" are implemented in
`~astropy.modeling.optimizers` and statistic functions are in
`~astropy.modeling.statistic`. The goal is to provide an easy to extend
framework and allow users to easily create new fitters by combining statistics
with optimizers.
There are two exceptions to the above scheme.
`~astropy.modeling.fitting.LinearLSQFitter` uses Numpy's `~numpy.linalg.lstsq`
function. `~astropy.modeling.fitting.LevMarLSQFitter` uses
`~scipy.optimize.leastsq` which combines optimization and statistic in one
implementation.
"""
# pylint: disable=invalid-name
import abc
import inspect
import operator
import warnings
from functools import reduce, wraps
from importlib.metadata import entry_points
import numpy as np
from astropy.units import Quantity
from astropy.utils.decorators import deprecated
from astropy.utils.exceptions import AstropyUserWarning
from .optimizers import DEFAULT_ACC, DEFAULT_EPS, DEFAULT_MAXITER, SLSQP, Simplex
from .spline import ( # noqa: F401
SplineExactKnotsFitter, SplineInterpolateFitter, SplineSmoothingFitter, SplineSplrepFitter)
from .statistic import leastsquare
from .utils import _combine_equivalency_dict, poly_map_domain
__all__ = ['LinearLSQFitter', 'LevMarLSQFitter', 'TRFLSQFitter',
'DogBoxLSQFitter', 'LMLSQFitter',
'FittingWithOutlierRemoval', 'SLSQPLSQFitter', 'SimplexLSQFitter',
'JointFitter', 'Fitter', 'ModelLinearityError', "ModelsError"]
# Statistic functions implemented in `astropy.modeling.statistic.py
STATISTICS = [leastsquare]
# Optimizers implemented in `astropy.modeling.optimizers.py
OPTIMIZERS = [Simplex, SLSQP]
class NonFiniteValueError(RuntimeError):
"""
Error raised when attempting to a non-finite value
"""
class Covariance():
"""Class for covariance matrix calculated by fitter. """
def __init__(self, cov_matrix, param_names):
self.cov_matrix = cov_matrix
self.param_names = param_names
def pprint(self, max_lines, round_val):
# Print and label lower triangle of covariance matrix
# Print rows for params up to `max_lines`, round floats to 'round_val'
longest_name = max(len(x) for x in self.param_names)
ret_str = 'parameter variances / covariances \n'
fstring = f'{"": <{longest_name}}| {{0}}\n'
for i, row in enumerate(self.cov_matrix):
if i <= max_lines-1:
param = self.param_names[i]
ret_str += (fstring.replace(' '*len(param), param, 1)
.format(repr(np.round(row[:i+1], round_val))[7:-2]))
else:
ret_str += '...'
return(ret_str.rstrip())
def __repr__(self):
return(self.pprint(max_lines=10, round_val=3))
def __getitem__(self, params):
# index covariance matrix by parameter names or indices
if len(params) != 2:
raise ValueError('Covariance must be indexed by two values.')
if all(isinstance(item, str) for item in params):
i1, i2 = self.param_names.index(params[0]), self.param_names.index(params[1])
elif all(isinstance(item, int) for item in params):
i1, i2 = params
else:
raise TypeError('Covariance can be indexed by two parameter names or integer indices.')
return(self.cov_matrix[i1][i2])
class StandardDeviations():
""" Class for fitting uncertainties."""
def __init__(self, cov_matrix, param_names):
self.param_names = param_names
self.stds = self._calc_stds(cov_matrix)
def _calc_stds(self, cov_matrix):
# sometimes scipy lstsq returns a non-sensical negative vals in the
# diagonals of the cov_x it computes.
stds = [np.sqrt(x) if x > 0 else None for x in np.diag(cov_matrix)]
return stds
def pprint(self, max_lines, round_val):
longest_name = max(len(x) for x in self.param_names)
ret_str = 'standard deviations\n'
for i, std in enumerate(self.stds):
if i <= max_lines-1:
param = self.param_names[i]
ret_str += (f"{param}{' ' * (longest_name - len(param))}| "
f"{np.round(std, round_val)}\n")
else:
ret_str += '...'
return(ret_str.rstrip())
def __repr__(self):
return(self.pprint(max_lines=10, round_val=3))
def __getitem__(self, param):
if isinstance(param, str):
i = self.param_names.index(param)
elif isinstance(param, int):
i = param
else:
raise TypeError('Standard deviation can be indexed by parameter name or integer.')
return(self.stds[i])
class ModelsError(Exception):
"""Base class for model exceptions"""
class ModelLinearityError(ModelsError):
""" Raised when a non-linear model is passed to a linear fitter."""
class UnsupportedConstraintError(ModelsError, ValueError):
"""
Raised when a fitter does not support a type of constraint.
"""
class _FitterMeta(abc.ABCMeta):
"""
Currently just provides a registry for all Fitter classes.
"""
registry = set()
def __new__(mcls, name, bases, members):
cls = super().__new__(mcls, name, bases, members)
if not inspect.isabstract(cls) and not name.startswith('_'):
mcls.registry.add(cls)
return cls
def fitter_unit_support(func):
"""
This is a decorator that can be used to add support for dealing with
quantities to any __call__ method on a fitter which may not support
quantities itself. This is done by temporarily removing units from all
parameters then adding them back once the fitting has completed.
"""
@wraps(func)
def wrapper(self, model, x, y, z=None, **kwargs):
equivalencies = kwargs.pop('equivalencies', None)
data_has_units = (isinstance(x, Quantity) or
isinstance(y, Quantity) or
isinstance(z, Quantity))
model_has_units = model._has_units
if data_has_units or model_has_units:
if model._supports_unit_fitting:
# We now combine any instance-level input equivalencies with user
# specified ones at call-time.
input_units_equivalencies = _combine_equivalency_dict(
model.inputs, equivalencies, model.input_units_equivalencies)
# If input_units is defined, we transform the input data into those
# expected by the model. We hard-code the input names 'x', and 'y'
# here since FittableModel instances have input names ('x',) or
# ('x', 'y')
if model.input_units is not None:
if isinstance(x, Quantity):
x = x.to(model.input_units[model.inputs[0]],
equivalencies=input_units_equivalencies[model.inputs[0]])
if isinstance(y, Quantity) and z is not None:
y = y.to(model.input_units[model.inputs[1]],
equivalencies=input_units_equivalencies[model.inputs[1]])
# Create a dictionary mapping the real model inputs and outputs
# names to the data. This remapping of names must be done here, after
# the input data is converted to the correct units.
rename_data = {model.inputs[0]: x}
if z is not None:
rename_data[model.outputs[0]] = z
rename_data[model.inputs[1]] = y
else:
rename_data[model.outputs[0]] = y
rename_data['z'] = None
# We now strip away the units from the parameters, taking care to
# first convert any parameters to the units that correspond to the
# input units (to make sure that initial guesses on the parameters)
# are in the right unit system
model = model.without_units_for_data(**rename_data)
if isinstance(model, tuple):
rename_data['_left_kwargs'] = model[1]
rename_data['_right_kwargs'] = model[2]
model = model[0]
# We strip away the units from the input itself
add_back_units = False
if isinstance(x, Quantity):
add_back_units = True
xdata = x.value
else:
xdata = np.asarray(x)
if isinstance(y, Quantity):
add_back_units = True
ydata = y.value
else:
ydata = np.asarray(y)
if z is not None:
if isinstance(z, Quantity):
add_back_units = True
zdata = z.value
else:
zdata = np.asarray(z)
# We run the fitting
if z is None:
model_new = func(self, model, xdata, ydata, **kwargs)
else:
model_new = func(self, model, xdata, ydata, zdata, **kwargs)
# And finally we add back units to the parameters
if add_back_units:
model_new = model_new.with_units_from_data(**rename_data)
return model_new
else:
raise NotImplementedError("This model does not support being "
"fit to data with units.")
else:
return func(self, model, x, y, z=z, **kwargs)
return wrapper
class Fitter(metaclass=_FitterMeta):
"""
Base class for all fitters.
Parameters
----------
optimizer : callable
A callable implementing an optimization algorithm
statistic : callable
Statistic function
"""
supported_constraints = []
def __init__(self, optimizer, statistic):
if optimizer is None:
raise ValueError("Expected an optimizer.")
if statistic is None:
raise ValueError("Expected a statistic function.")
if inspect.isclass(optimizer):
# a callable class
self._opt_method = optimizer()
elif inspect.isfunction(optimizer):
self._opt_method = optimizer
else:
raise ValueError("Expected optimizer to be a callable class or a function.")
if inspect.isclass(statistic):
self._stat_method = statistic()
else:
self._stat_method = statistic
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
parameters returned by the fitter
args : list
[model, [other_args], [input coordinates]]
other_args may include weights or any other quantities specific for
a statistic
Notes
-----
The list of arguments (args) is set in the `__call__` method.
Fitters may overwrite this method, e.g. when statistic functions
require other arguments.
"""
model = args[0]
meas = args[-1]
fitter_to_model_params(model, fps)
res = self._stat_method(meas, model, *args[1:-1])
return res
@staticmethod
def _add_fitting_uncertainties(*args):
"""
When available, calculate and sets the parameter covariance matrix
(model.cov_matrix) and standard deviations (model.stds).
"""
return None
@abc.abstractmethod
def __call__(self):
"""
This method performs the actual fitting and modifies the parameter list
of a model.
Fitter subclasses should implement this method.
"""
raise NotImplementedError("Subclasses should implement this method.")
# TODO: I have ongoing branch elsewhere that's refactoring this module so that
# all the fitter classes in here are Fitter subclasses. In the meantime we
# need to specify that _FitterMeta is its metaclass.
class LinearLSQFitter(metaclass=_FitterMeta):
"""
A class performing a linear least square fitting.
Uses `numpy.linalg.lstsq` to do the fitting.
Given a model and data, fits the model to the data and changes the
model's parameters. Keeps a dictionary of auxiliary fitting information.
Notes
-----
Note that currently LinearLSQFitter does not support compound models.
"""
supported_constraints = ['fixed']
supports_masked_input = True
def __init__(self, calc_uncertainties=False):
self.fit_info = {'residuals': None,
'rank': None,
'singular_values': None,
'params': None
}
self._calc_uncertainties = calc_uncertainties
@staticmethod
def _is_invertible(m):
"""Check if inverse of matrix can be obtained."""
if m.shape[0] != m.shape[1]:
return False
if np.linalg.matrix_rank(m) < m.shape[0]:
return False
return True
def _add_fitting_uncertainties(self, model, a, n_coeff, x, y, z=None,
resids=None):
"""
Calculate and parameter covariance matrix and standard deviations
and set `cov_matrix` and `stds` attributes.
"""
x_dot_x_prime = np.dot(a.T, a)
masked = False or hasattr(y, 'mask')
# check if invertible. if not, can't calc covariance.
if not self._is_invertible(x_dot_x_prime):
return(model)
inv_x_dot_x_prime = np.linalg.inv(x_dot_x_prime)
if z is None: # 1D models
if len(model) == 1: # single model
mask = None
if masked:
mask = y.mask
xx = np.ma.array(x, mask=mask)
RSS = [(1/(xx.count()-n_coeff)) * resids]
if len(model) > 1: # model sets
RSS = [] # collect sum residuals squared for each model in set
for j in range(len(model)):
mask = None
if masked:
mask = y.mask[..., j].flatten()
xx = np.ma.array(x, mask=mask)
eval_y = model(xx, model_set_axis=False)
eval_y = np.rollaxis(eval_y, model.model_set_axis)[j]
RSS.append((1/(xx.count()-n_coeff)) * np.sum((y[..., j] - eval_y)**2))
else: # 2D model
if len(model) == 1:
mask = None
if masked:
warnings.warn('Calculation of fitting uncertainties '
'for 2D models with masked values not '
'currently supported.\n',
AstropyUserWarning)
return
xx, _ = np.ma.array(x, mask=mask), np.ma.array(y, mask=mask)
# len(xx) instead of xx.count. this will break if values are masked?
RSS = [(1/(len(xx)-n_coeff)) * resids]
else:
RSS = []
for j in range(len(model)):
eval_z = model(x, y, model_set_axis=False)
mask = None # need to figure out how to deal w/ masking here.
if model.model_set_axis == 1:
# model_set_axis passed when evaluating only refers to input shapes
# so output must be reshaped for model_set_axis=1.
eval_z = np.rollaxis(eval_z, 1)
eval_z = eval_z[j]
RSS.append([(1/(len(x)-n_coeff)) * np.sum((z[j] - eval_z)**2)])
covs = [inv_x_dot_x_prime * r for r in RSS]
free_param_names = [x for x in model.fixed if (model.fixed[x] is False)
and (model.tied[x] is False)]
if len(covs) == 1:
model.cov_matrix = Covariance(covs[0], model.param_names)
model.stds = StandardDeviations(covs[0], free_param_names)
else:
model.cov_matrix = [Covariance(cov, model.param_names) for cov in covs]
model.stds = [StandardDeviations(cov, free_param_names) for cov in covs]
@staticmethod
def _deriv_with_constraints(model, param_indices, x=None, y=None):
if y is None:
d = np.array(model.fit_deriv(x, *model.parameters))
else:
d = np.array(model.fit_deriv(x, y, *model.parameters))
if model.col_fit_deriv:
return d[param_indices]
else:
return d[..., param_indices]
def _map_domain_window(self, model, x, y=None):
"""
Maps domain into window for a polynomial model which has these
attributes.
"""
if y is None:
if hasattr(model, 'domain') and model.domain is None:
model.domain = [x.min(), x.max()]
if hasattr(model, 'window') and model.window is None:
model.window = [-1, 1]
return poly_map_domain(x, model.domain, model.window)
else:
if hasattr(model, 'x_domain') and model.x_domain is None:
model.x_domain = [x.min(), x.max()]
if hasattr(model, 'y_domain') and model.y_domain is None:
model.y_domain = [y.min(), y.max()]
if hasattr(model, 'x_window') and model.x_window is None:
model.x_window = [-1., 1.]
if hasattr(model, 'y_window') and model.y_window is None:
model.y_window = [-1., 1.]
xnew = poly_map_domain(x, model.x_domain, model.x_window)
ynew = poly_map_domain(y, model.y_domain, model.y_window)
return xnew, ynew
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, rcond=None):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
Input coordinates
y : array-like
Input coordinates
z : array-like, optional
Input coordinates.
If the dependent (``y`` or ``z``) coordinate values are provided
as a `numpy.ma.MaskedArray`, any masked points are ignored when
fitting. Note that model set fitting is significantly slower when
there are masked points (not just an empty mask), as the matrix
equation has to be solved for each model separately when their
coordinate grids differ.
weights : array, optional
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
rcond : float, optional
Cut-off ratio for small singular values of ``a``.
Singular values are set to zero if they are smaller than ``rcond``
times the largest singular value of ``a``.
equivalencies : list or None, optional, keyword-only
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
if not model.fittable:
raise ValueError("Model must be a subclass of FittableModel")
if not model.linear:
raise ModelLinearityError('Model is not linear in parameters, '
'linear fit methods should not be used.')
if hasattr(model, "submodel_names"):
raise ValueError("Model must be simple, not compound")
_validate_constraints(self.supported_constraints, model)
model_copy = model.copy()
model_copy.sync_constraints = False
_, fitparam_indices, _ = model_to_fit_params(model_copy)
if model_copy.n_inputs == 2 and z is None:
raise ValueError("Expected x, y and z for a 2 dimensional model.")
farg = _convert_input(x, y, z, n_models=len(model_copy),
model_set_axis=model_copy.model_set_axis)
has_fixed = any(model_copy.fixed.values())
# This is also done by _convert_inputs, but we need it here to allow
# checking the array dimensionality before that gets called:
if weights is not None:
weights = np.asarray(weights, dtype=float)
if has_fixed:
# The list of fixed params is the complement of those being fitted:
fixparam_indices = [idx for idx in
range(len(model_copy.param_names))
if idx not in fitparam_indices]
# Construct matrix of user-fixed parameters that can be dotted with
# the corresponding fit_deriv() terms, to evaluate corrections to
# the dependent variable in order to fit only the remaining terms:
fixparams = np.asarray([getattr(model_copy,
model_copy.param_names[idx]).value
for idx in fixparam_indices])
if len(farg) == 2:
x, y = farg
if weights is not None:
# If we have separate weights for each model, apply the same
# conversion as for the data, otherwise check common weights
# as if for a single model:
_, weights = _convert_input(
x, weights,
n_models=len(model_copy) if weights.ndim == y.ndim else 1,
model_set_axis=model_copy.model_set_axis
)
# map domain into window
if hasattr(model_copy, 'domain'):
x = self._map_domain_window(model_copy, x)
if has_fixed:
lhs = np.asarray(self._deriv_with_constraints(model_copy,
fitparam_indices,
x=x))
fixderivs = self._deriv_with_constraints(model_copy, fixparam_indices, x=x)
else:
lhs = np.asarray(model_copy.fit_deriv(x, *model_copy.parameters))
sum_of_implicit_terms = model_copy.sum_of_implicit_terms(x)
rhs = y
else:
x, y, z = farg
if weights is not None:
# If we have separate weights for each model, apply the same
# conversion as for the data, otherwise check common weights
# as if for a single model:
_, _, weights = _convert_input(
x, y, weights,
n_models=len(model_copy) if weights.ndim == z.ndim else 1,
model_set_axis=model_copy.model_set_axis
)
# map domain into window
if hasattr(model_copy, 'x_domain'):
x, y = self._map_domain_window(model_copy, x, y)
if has_fixed:
lhs = np.asarray(self._deriv_with_constraints(model_copy,
fitparam_indices, x=x, y=y))
fixderivs = self._deriv_with_constraints(model_copy,
fixparam_indices,
x=x, y=y)
else:
lhs = np.asanyarray(model_copy.fit_deriv(x, y, *model_copy.parameters))
sum_of_implicit_terms = model_copy.sum_of_implicit_terms(x, y)
if len(model_copy) > 1:
# Just to be explicit (rather than baking in False == 0):
model_axis = model_copy.model_set_axis or 0
if z.ndim > 2:
# For higher-dimensional z, flatten all the axes except the
# dimension along which models are stacked and transpose so
# the model axis is *last* (I think this resolves Erik's
# pending generalization from 80a6f25a):
rhs = np.rollaxis(z, model_axis, z.ndim)
rhs = rhs.reshape(-1, rhs.shape[-1])
else:
# This "else" seems to handle the corner case where the
# user has already flattened x/y before attempting a 2D fit
# but z has a second axis for the model set. NB. This is
# ~5-10x faster than using rollaxis.
rhs = z.T if model_axis == 0 else z
if weights is not None:
# Same for weights
if weights.ndim > 2:
# Separate 2D weights for each model:
weights = np.rollaxis(weights, model_axis, weights.ndim)
weights = weights.reshape(-1, weights.shape[-1])
elif weights.ndim == z.ndim:
# Separate, flattened weights for each model:
weights = weights.T if model_axis == 0 else weights
else:
# Common weights for all the models:
weights = weights.flatten()
else:
rhs = z.flatten()
if weights is not None:
weights = weights.flatten()
# If the derivative is defined along rows (as with non-linear models)
if model_copy.col_fit_deriv:
lhs = np.asarray(lhs).T
# Some models (eg. Polynomial1D) don't flatten multi-dimensional inputs
# when constructing their Vandermonde matrix, which can lead to obscure
# failures below. Ultimately, np.linalg.lstsq can't handle >2D matrices,
# so just raise a slightly more informative error when this happens:
if np.asanyarray(lhs).ndim > 2:
raise ValueError(f"{type(model_copy).__name__} gives unsupported >2D "
"derivative matrix for this x/y")
# Subtract any terms fixed by the user from (a copy of) the RHS, in
# order to fit the remaining terms correctly:
if has_fixed:
if model_copy.col_fit_deriv:
fixderivs = np.asarray(fixderivs).T # as for lhs above
rhs = rhs - fixderivs.dot(fixparams) # evaluate user-fixed terms
# Subtract any terms implicit in the model from the RHS, which, like
# user-fixed terms, affect the dependent variable but are not fitted:
if sum_of_implicit_terms is not None:
# If we have a model set, the extra axis must be added to
# sum_of_implicit_terms as its innermost dimension, to match the
# dimensionality of rhs after _convert_input "rolls" it as needed
# by np.linalg.lstsq. The vector then gets broadcast to the right
# number of sets (columns). This assumes all the models share the
# same input coordinates, as is currently the case.
if len(model_copy) > 1:
sum_of_implicit_terms = sum_of_implicit_terms[..., np.newaxis]
rhs = rhs - sum_of_implicit_terms
if weights is not None:
if rhs.ndim == 2:
if weights.shape == rhs.shape:
# separate weights for multiple models case: broadcast
# lhs to have more dimension (for each model)
lhs = lhs[..., np.newaxis] * weights[:, np.newaxis]
rhs = rhs * weights
else:
lhs *= weights[:, np.newaxis]
# Don't modify in-place in case rhs was the original
# dependent variable array
rhs = rhs * weights[:, np.newaxis]
else:
lhs *= weights[:, np.newaxis]
rhs = rhs * weights
scl = (lhs * lhs).sum(0)
lhs /= scl
masked = np.any(np.ma.getmask(rhs))
if weights is not None and not masked and np.any(np.isnan(lhs)):
raise ValueError('Found NaNs in the coefficient matrix, which '
'should not happen and would crash the lapack '
'routine. Maybe check that weights are not null.')
a = None # need for calculating covarience
if ((masked and len(model_copy) > 1) or
(weights is not None and weights.ndim > 1)):
# Separate masks or weights for multiple models case: Numpy's
# lstsq supports multiple dimensions only for rhs, so we need to
# loop manually on the models. This may be fixed in the future
# with https://github.com/numpy/numpy/pull/15777.
# Initialize empty array of coefficients and populate it one model
# at a time. The shape matches the number of coefficients from the
# Vandermonde matrix and the number of models from the RHS:
lacoef = np.zeros(lhs.shape[1:2] + rhs.shape[-1:], dtype=rhs.dtype)
# Arrange the lhs as a stack of 2D matrices that we can iterate
# over to get the correctly-orientated lhs for each model:
if lhs.ndim > 2:
lhs_stack = np.rollaxis(lhs, -1, 0)
else:
lhs_stack = np.broadcast_to(lhs, rhs.shape[-1:] + lhs.shape)
# Loop over the models and solve for each one. By this point, the
# model set axis is the second of two. Transpose rather than using,
# say, np.moveaxis(array, -1, 0), since it's slightly faster and
# lstsq can't handle >2D arrays anyway. This could perhaps be
# optimized by collecting together models with identical masks
# (eg. those with no rejected points) into one operation, though it
# will still be relatively slow when calling lstsq repeatedly.
for model_lhs, model_rhs, model_lacoef in zip(lhs_stack, rhs.T, lacoef.T):
# Cull masked points on both sides of the matrix equation:
good = ~model_rhs.mask if masked else slice(None)
model_lhs = model_lhs[good]
model_rhs = model_rhs[good][..., np.newaxis]
a = model_lhs
# Solve for this model:
t_coef, resids, rank, sval = np.linalg.lstsq(model_lhs,
model_rhs, rcond)
model_lacoef[:] = t_coef.T
else:
# If we're fitting one or more models over a common set of points,
# we only have to solve a single matrix equation, which is an order
# of magnitude faster than calling lstsq() once per model below:
good = ~rhs.mask if masked else slice(None) # latter is a no-op
a = lhs[good]
# Solve for one or more models:
lacoef, resids, rank, sval = np.linalg.lstsq(lhs[good],
rhs[good], rcond)
self.fit_info['residuals'] = resids
self.fit_info['rank'] = rank
self.fit_info['singular_values'] = sval
lacoef /= scl[:, np.newaxis] if scl.ndim < rhs.ndim else scl
self.fit_info['params'] = lacoef
fitter_to_model_params(model_copy, lacoef.flatten())
# TODO: Only Polynomial models currently have an _order attribute;
# maybe change this to read isinstance(model, PolynomialBase)
if (hasattr(model_copy, '_order') and
len(model_copy) == 1 and
not has_fixed and
rank != model_copy._order):
warnings.warn("The fit may be poorly conditioned\n",
AstropyUserWarning)
# calculate and set covariance matrix and standard devs. on model
if self._calc_uncertainties:
if len(y) > len(lacoef):
self._add_fitting_uncertainties(model_copy, a*scl,
len(lacoef), x, y, z, resids)
model_copy.sync_constraints = True
return model_copy
class FittingWithOutlierRemoval:
"""
This class combines an outlier removal technique with a fitting procedure.
Basically, given a maximum number of iterations ``niter``, outliers are
removed and fitting is performed for each iteration, until no new outliers
are found or ``niter`` is reached.
Parameters
----------
fitter : `Fitter`
An instance of any Astropy fitter, i.e., LinearLSQFitter,
LevMarLSQFitter, SLSQPLSQFitter, SimplexLSQFitter, JointFitter. For
model set fitting, this must understand masked input data (as
indicated by the fitter class attribute ``supports_masked_input``).
outlier_func : callable
A function for outlier removal.
If this accepts an ``axis`` parameter like the `numpy` functions, the
appropriate value will be supplied automatically when fitting model
sets (unless overridden in ``outlier_kwargs``), to find outliers for
each model separately; otherwise, the same filtering must be performed
in a loop over models, which is almost an order of magnitude slower.
niter : int, optional
Maximum number of iterations.
outlier_kwargs : dict, optional
Keyword arguments for outlier_func.
Attributes
----------
fit_info : dict
The ``fit_info`` (if any) from the last iteration of the wrapped
``fitter`` during the most recent fit. An entry is also added with the
keyword ``niter`` that records the actual number of fitting iterations
performed (as opposed to the user-specified maximum).
"""
def __init__(self, fitter, outlier_func, niter=3, **outlier_kwargs):
self.fitter = fitter
self.outlier_func = outlier_func
self.niter = niter
self.outlier_kwargs = outlier_kwargs
self.fit_info = {'niter': None}
def __str__(self):
return (f"Fitter: {self.fitter.__class__.__name__}\n"
f"Outlier function: {self.outlier_func.__name__}\n"
f"Num. of iterations: {self.niter}\n"
f"Outlier func. args.: {self.outlier_kwargs}")
def __repr__(self):
return (f"{self.__class__.__name__}(fitter: {self.fitter.__class__.__name__}, "
f"outlier_func: {self.outlier_func.__name__},"
f" niter: {self.niter}, outlier_kwargs: {self.outlier_kwargs})")
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Parameters
----------
model : `~astropy.modeling.FittableModel`
An analytic model which will be fit to the provided data.
This also contains the initial guess for an optimization
algorithm.
x : array-like
Input coordinates.
y : array-like
Data measurements (1D case) or input coordinates (2D case).
z : array-like, optional
Data measurements (2D case).
weights : array-like, optional
Weights to be passed to the fitter.
kwargs : dict, optional
Keyword arguments to be passed to the fitter.
Returns
-------
fitted_model : `~astropy.modeling.FittableModel`
Fitted model after outlier removal.
mask : `numpy.ndarray`
Boolean mask array, identifying which points were used in the final
fitting iteration (False) and which were found to be outliers or
were masked in the input (True).
"""
# For single models, the data get filtered here at each iteration and
# then passed to the fitter, which is the historical behavior and
# works even for fitters that don't understand masked arrays. For model
# sets, the fitter must be able to filter masked data internally,
# because fitters require a single set of x/y coordinates whereas the
# eliminated points can vary between models. To avoid this limitation,
# we could fall back to looping over individual model fits, but it
# would likely be fiddly and involve even more overhead (and the
# non-linear fitters don't work with model sets anyway, as of writing).
if len(model) == 1:
model_set_axis = None
else:
if (not hasattr(self.fitter, 'supports_masked_input') or
self.fitter.supports_masked_input is not True):
raise ValueError(f"{type(self.fitter).__name__} cannot fit model sets with masked "
"values")
# Fitters use their input model's model_set_axis to determine how
# their input data are stacked:
model_set_axis = model.model_set_axis
# Construct input coordinate tuples for fitters & models that are
# appropriate for the dimensionality being fitted:
if z is None:
coords = (x, )
data = y
else:
coords = x, y
data = z
# For model sets, construct a numpy-standard "axis" tuple for the
# outlier function, to treat each model separately (if supported):
if model_set_axis is not None:
if model_set_axis < 0:
model_set_axis += data.ndim
if 'axis' not in self.outlier_kwargs: # allow user override
# This also works for False (like model instantiation):
self.outlier_kwargs['axis'] = tuple(
n for n in range(data.ndim) if n != model_set_axis
)
loop = False
# Starting fit, prior to any iteration and masking:
fitted_model = self.fitter(model, x, y, z, weights=weights, **kwargs)
filtered_data = np.ma.masked_array(data)
if filtered_data.mask is np.ma.nomask:
filtered_data.mask = False
filtered_weights = weights
last_n_masked = filtered_data.mask.sum()
n = 0 # (allow recording no. of iterations when 0)
# Perform the iterative fitting:
for n in range(1, self.niter + 1):
# (Re-)evaluate the last model:
model_vals = fitted_model(*coords, model_set_axis=False)
# Determine the outliers:
if not loop:
# Pass axis parameter if outlier_func accepts it, otherwise
# prepare for looping over models:
try:
filtered_data = self.outlier_func(
filtered_data - model_vals, **self.outlier_kwargs
)
# If this happens to catch an error with a parameter other
# than axis, the next attempt will fail accordingly:
except TypeError:
if model_set_axis is None:
raise
else:
self.outlier_kwargs.pop('axis', None)
loop = True
# Construct MaskedArray to hold filtered values:
filtered_data = np.ma.masked_array(
filtered_data,
dtype=np.result_type(filtered_data, model_vals),
copy=True
)
# Make sure the mask is an array, not just nomask:
if filtered_data.mask is np.ma.nomask:
filtered_data.mask = False
# Get views transposed appropriately for iteration
# over the set (handling data & mask separately due to
# NumPy issue #8506):
data_T = np.rollaxis(filtered_data, model_set_axis, 0)
mask_T = np.rollaxis(filtered_data.mask,
model_set_axis, 0)
if loop:
model_vals_T = np.rollaxis(model_vals, model_set_axis, 0)
for row_data, row_mask, row_mod_vals in zip(data_T, mask_T,
model_vals_T):
masked_residuals = self.outlier_func(
row_data - row_mod_vals, **self.outlier_kwargs
)
row_data.data[:] = masked_residuals.data
row_mask[:] = masked_residuals.mask
# Issue speed warning after the fact, so it only shows up when
# the TypeError is genuinely due to the axis argument.
warnings.warn('outlier_func did not accept axis argument; '
'reverted to slow loop over models.',
AstropyUserWarning)
# Recombine newly-masked residuals with model to get masked values:
filtered_data += model_vals
# Re-fit the data after filtering, passing masked/unmasked values
# for single models / sets, respectively:
if model_set_axis is None:
good = ~filtered_data.mask
if weights is not None:
filtered_weights = weights[good]
fitted_model = self.fitter(fitted_model,
*(c[good] for c in coords),
filtered_data.data[good],
weights=filtered_weights, **kwargs)
else:
fitted_model = self.fitter(fitted_model, *coords,
filtered_data,
weights=filtered_weights, **kwargs)
# Stop iteration if the masked points are no longer changing (with
# cumulative rejection we only need to compare how many there are):
this_n_masked = filtered_data.mask.sum() # (minimal overhead)
if this_n_masked == last_n_masked:
break
last_n_masked = this_n_masked
self.fit_info = {'niter': n}
self.fit_info.update(getattr(self.fitter, 'fit_info', {}))
return fitted_model, filtered_data.mask
class _NonLinearLSQFitter(metaclass=_FitterMeta):
"""
Base class for Non-Linear least-squares fitters
Parameters
----------
calc_uncertainties : bool
If the covarience matrix should be computed and set in the fit_info.
Default: False
use_min_max_bounds : bool
If the set parameter bounds for a model will be enforced each given
parameter while fitting via a simple min/max condition.
Default: True
"""
supported_constraints = ['fixed', 'tied', 'bounds']
"""
The constraint types supported by this fitter type.
"""
def __init__(self, calc_uncertainties=False, use_min_max_bounds=True):
self.fit_info = None
self._calc_uncertainties = calc_uncertainties
self._use_min_max_bounds = use_min_max_bounds
super().__init__()
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
parameters returned by the fitter
args : list
[model, [weights], [input coordinates]]
"""
model = args[0]
weights = args[1]
fitter_to_model_params(model, fps, self._use_min_max_bounds)
meas = args[-1]
if weights is None:
value = np.ravel(model(*args[2: -1]) - meas)
else:
value = np.ravel(weights * (model(*args[2: -1]) - meas))
if not np.all(np.isfinite(value)):
raise NonFiniteValueError("Objective function has encountered a non-finite value, "
"this will cause the fit to fail!\n"
"Please remove non-finite values from your input data before fitting to avoid this error.")
return value
@staticmethod
def _add_fitting_uncertainties(model, cov_matrix):
"""
Set ``cov_matrix`` and ``stds`` attributes on model with parameter
covariance matrix returned by ``optimize.leastsq``.
"""
free_param_names = [x for x in model.fixed if (model.fixed[x] is False)
and (model.tied[x] is False)]
model.cov_matrix = Covariance(cov_matrix, free_param_names)
model.stds = StandardDeviations(cov_matrix, free_param_names)
@staticmethod
def _wrap_deriv(params, model, weights, x, y, z=None):
"""
Wraps the method calculating the Jacobian of the function to account
for model constraints.
`scipy.optimize.leastsq` expects the function derivative to have the
above signature (parlist, (argtuple)). In order to accommodate model
constraints, instead of using p directly, we set the parameter list in
this function.
"""
if weights is None:
weights = 1.0
if any(model.fixed.values()) or any(model.tied.values()):
# update the parameters with the current values from the fitter
fitter_to_model_params(model, params)
if z is None:
full = np.array(model.fit_deriv(x, *model.parameters))
if not model.col_fit_deriv:
full_deriv = np.ravel(weights) * full.T
else:
full_deriv = np.ravel(weights) * full
else:
full = np.array([np.ravel(_) for _ in model.fit_deriv(x, y, *model.parameters)])
if not model.col_fit_deriv:
full_deriv = np.ravel(weights) * full.T
else:
full_deriv = np.ravel(weights) * full
pars = [getattr(model, name) for name in model.param_names]
fixed = [par.fixed for par in pars]
tied = [par.tied for par in pars]
tied = list(np.where([par.tied is not False for par in pars],
True, tied))
fix_and_tie = np.logical_or(fixed, tied)
ind = np.logical_not(fix_and_tie)
if not model.col_fit_deriv:
residues = np.asarray(full_deriv[np.nonzero(ind)]).T
else:
residues = full_deriv[np.nonzero(ind)]
return [np.ravel(_) for _ in residues]
else:
if z is None:
try:
return np.array([np.ravel(_) for _ in np.array(weights) *
np.array(model.fit_deriv(x, *params))])
except ValueError:
return np.array([np.ravel(_) for _ in np.array(weights) *
np.moveaxis(
np.array(model.fit_deriv(x, *params)),
-1, 0)]).transpose()
else:
if not model.col_fit_deriv:
return [np.ravel(_) for _ in
(np.ravel(weights) * np.array(model.fit_deriv(x, y, *params)).T).T]
return [np.ravel(_) for _ in weights * np.array(model.fit_deriv(x, y, *params))]
def _compute_param_cov(self, model, y, init_values, cov_x, fitparams, farg):
# now try to compute the true covariance matrix
if (len(y) > len(init_values)) and cov_x is not None:
sum_sqrs = np.sum(self.objective_function(fitparams, *farg)**2)
dof = len(y) - len(init_values)
self.fit_info['param_cov'] = cov_x * sum_sqrs / dof
else:
self.fit_info['param_cov'] = None
if self._calc_uncertainties is True:
if self.fit_info['param_cov'] is not None:
self._add_fitting_uncertainties(model,
self.fit_info['param_cov'])
def _run_fitter(self, model, farg, maxiter, acc, epsilon, estimate_jacobian):
return None, None, None
def _filter_non_finite(self, x, y, z=None):
"""
Filter out non-finite values in x, y, z.
Returns
-------
x, y, z : ndarrays
x, y, and z with non-finite values filtered out.
"""
MESSAGE = "Non-Finite input data has been removed by the fitter."
if z is None:
mask = np.isfinite(y)
if not np.all(mask):
warnings.warn(MESSAGE, AstropyUserWarning)
return x[mask], y[mask], None
else:
mask = np.isfinite(z)
if not np.all(mask):
warnings.warn(MESSAGE, AstropyUserWarning)
return x[mask], y[mask], z[mask]
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None,
maxiter=DEFAULT_MAXITER, acc=DEFAULT_ACC,
epsilon=DEFAULT_EPS, estimate_jacobian=False,
filter_non_finite=False):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array, optional
input coordinates
weights : array, optional
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
maxiter : int
maximum number of iterations
acc : float
Relative error desired in the approximate solution
epsilon : float
A suitable step length for the forward-difference
approximation of the Jacobian (if model.fjac=None). If
epsfcn is less than the machine precision, it is
assumed that the relative errors in the functions are
of the order of the machine precision.
estimate_jacobian : bool
If False (default) and if the model has a fit_deriv method,
it will be used. Otherwise the Jacobian will be estimated.
If True, the Jacobian will be estimated in any case.
equivalencies : list or None, optional, keyword-only
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
filter_non_finite : bool, optional
Whether or not to filter data with non-finite values. Default is False
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
model_copy = _validate_model(model, self.supported_constraints)
model_copy.sync_constraints = False
if filter_non_finite:
x, y, z = self._filter_non_finite(x, y, z)
farg = (model_copy, weights, ) + _convert_input(x, y, z)
init_values, fitparams, cov_x = self._run_fitter(model_copy, farg,
maxiter, acc, epsilon, estimate_jacobian)
self._compute_param_cov(model_copy, y, init_values, cov_x, fitparams, farg)
model.sync_constraints = True
return model_copy
class LevMarLSQFitter(_NonLinearLSQFitter):
"""
Levenberg-Marquardt algorithm and least squares statistic.
Parameters
----------
calc_uncertainties : bool
If the covarience matrix should be computed and set in the fit_info.
Default: False
Attributes
----------
fit_info : dict
The `scipy.optimize.leastsq` result for the most recent fit (see
notes).
Notes
-----
The ``fit_info`` dictionary contains the values returned by
`scipy.optimize.leastsq` for the most recent fit, including the values from
the ``infodict`` dictionary it returns. See the `scipy.optimize.leastsq`
documentation for details on the meaning of these values. Note that the
``x`` return value is *not* included (as it is instead the parameter values
of the returned model).
Additionally, one additional element of ``fit_info`` is computed whenever a
model is fit, with the key 'param_cov'. The corresponding value is the
covariance matrix of the parameters as a 2D numpy array. The order of the
matrix elements matches the order of the parameters in the fitted model
(i.e., the same order as ``model.param_names``).
"""
def __init__(self, calc_uncertainties=False):
super().__init__(calc_uncertainties)
self.fit_info = {'nfev': None,
'fvec': None,
'fjac': None,
'ipvt': None,
'qtf': None,
'message': None,
'ierr': None,
'param_jac': None,
'param_cov': None}
def _run_fitter(self, model, farg, maxiter, acc, epsilon, estimate_jacobian):
from scipy import optimize
if model.fit_deriv is None or estimate_jacobian:
dfunc = None
else:
dfunc = self._wrap_deriv
init_values, _, _ = model_to_fit_params(model)
fitparams, cov_x, dinfo, mess, ierr = optimize.leastsq(
self.objective_function, init_values, args=farg, Dfun=dfunc,
col_deriv=model.col_fit_deriv, maxfev=maxiter, epsfcn=epsilon,
xtol=acc, full_output=True)
fitter_to_model_params(model, fitparams)
self.fit_info.update(dinfo)
self.fit_info['cov_x'] = cov_x
self.fit_info['message'] = mess
self.fit_info['ierr'] = ierr
if ierr not in [1, 2, 3, 4]:
warnings.warn("The fit may be unsuccessful; check "
"fit_info['message'] for more information.",
AstropyUserWarning)
return init_values, fitparams, cov_x
class _NLLSQFitter(_NonLinearLSQFitter):
"""
Wrapper class for `scipy.optimize.least_squares` method, which provides:
- Trust Region Reflective
- dogbox
- Levenberg-Marqueardt
algorithms using the least squares statistic.
Parameters
----------
method : str
‘trf’ : Trust Region Reflective algorithm, particularly suitable
for large sparse problems with bounds. Generally robust method.
‘dogbox’ : dogleg algorithm with rectangular trust regions, typical
use case is small problems with bounds. Not recommended for
problems with rank-deficient Jacobian.
‘lm’ : Levenberg-Marquardt algorithm as implemented in MINPACK.
Doesn’t handle bounds and sparse Jacobians. Usually the most
efficient method for small unconstrained problems.
calc_uncertainties : bool
If the covarience matrix should be computed and set in the fit_info.
Default: False
use_min_max_bounds: bool
If the set parameter bounds for a model will be enforced each given
parameter while fitting via a simple min/max condition. A True setting
will replicate how LevMarLSQFitter enforces bounds.
Default: False
Attributes
----------
fit_info :
A `scipy.optimize.OptimizeResult` class which contains all of
the most recent fit information
"""
def __init__(self, method, calc_uncertainties=False, use_min_max_bounds=False):
super().__init__(calc_uncertainties, use_min_max_bounds)
self._method = method
def _run_fitter(self, model, farg, maxiter, acc, epsilon, estimate_jacobian):
from scipy import optimize
from scipy.linalg import svd
if model.fit_deriv is None or estimate_jacobian:
dfunc = '2-point'
else:
def _dfunc(params, model, weights, x, y, z=None):
if model.col_fit_deriv:
return np.transpose(self._wrap_deriv(params, model, weights, x, y, z))
else:
return self._wrap_deriv(params, model, weights, x, y, z)
dfunc = _dfunc
init_values, _, bounds = model_to_fit_params(model)
# Note, if use_min_max_bounds is True we are defaulting to enforcing bounds
# using the old method employed by LevMarLSQFitter, this is different
# from the method that optimize.least_squares employs to enforce bounds
# thus we override the bounds being passed to optimize.least_squares so
# that it will not enforce any bounding.
if self._use_min_max_bounds:
bounds = (-np.inf, np.inf)
self.fit_info = optimize.least_squares(
self.objective_function, init_values, args=farg, jac=dfunc,
max_nfev=maxiter, diff_step=np.sqrt(epsilon), xtol=acc,
method=self._method, bounds=bounds
)
# Adapted from ~scipy.optimize.minpack, see:
# https://github.com/scipy/scipy/blob/47bb6febaa10658c72962b9615d5d5aa2513fa3a/scipy/optimize/minpack.py#L795-L816
# Do Moore-Penrose inverse discarding zero singular values.
_, s, VT = svd(self.fit_info.jac, full_matrices=False)
threshold = np.finfo(float).eps * max(self.fit_info.jac.shape) * s[0]
s = s[s > threshold]
VT = VT[:s.size]
cov_x = np.dot(VT.T / s**2, VT)
fitter_to_model_params(model, self.fit_info.x, False)
if not self.fit_info.success:
warnings.warn("The fit may be unsuccessful; check: \n"
f" {self.fit_info.message}",
AstropyUserWarning)
return init_values, self.fit_info.x, cov_x
class TRFLSQFitter(_NLLSQFitter):
"""
Trust Region Reflective algorithm and least squares statistic.
Parameters
----------
calc_uncertainties : bool
If the covarience matrix should be computed and set in the fit_info.
Default: False
use_min_max_bounds: bool
If the set parameter bounds for a model will be enforced each given
parameter while fitting via a simple min/max condition. A True setting
will replicate how LevMarLSQFitter enforces bounds.
Default: False
Attributes
----------
fit_info :
A `scipy.optimize.OptimizeResult` class which contains all of
the most recent fit information
"""
def __init__(self, calc_uncertainties=False, use_min_max_bounds=False):
super().__init__('trf', calc_uncertainties, use_min_max_bounds)
class DogBoxLSQFitter(_NLLSQFitter):
"""
DogBox algorithm and least squares statistic.
Parameters
----------
calc_uncertainties : bool
If the covarience matrix should be computed and set in the fit_info.
Default: False
use_min_max_bounds: bool
If the set parameter bounds for a model will be enforced each given
parameter while fitting via a simple min/max condition. A True setting
will replicate how LevMarLSQFitter enforces bounds.
Default: False
Attributes
----------
fit_info :
A `scipy.optimize.OptimizeResult` class which contains all of
the most recent fit information
"""
def __init__(self, calc_uncertainties=False, use_min_max_bounds=False):
super().__init__('dogbox', calc_uncertainties, use_min_max_bounds)
class LMLSQFitter(_NLLSQFitter):
"""
`scipy.optimize.least_squares` Levenberg-Marquardt algorithm and least squares statistic.
Parameters
----------
calc_uncertainties : bool
If the covarience matrix should be computed and set in the fit_info.
Default: False
Attributes
----------
fit_info :
A `scipy.optimize.OptimizeResult` class which contains all of
the most recent fit information
"""
def __init__(self, calc_uncertainties=False):
super().__init__('lm', calc_uncertainties, True)
class SLSQPLSQFitter(Fitter):
"""
Sequential Least Squares Programming (SLSQP) optimization algorithm and
least squares statistic.
Raises
------
ModelLinearityError
A linear model is passed to a nonlinear fitter
Notes
-----
See also the `~astropy.modeling.optimizers.SLSQP` optimizer.
"""
supported_constraints = SLSQP.supported_constraints
def __init__(self):
super().__init__(optimizer=SLSQP, statistic=leastsquare)
self.fit_info = {}
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array, optional
input coordinates
weights : array, optional
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
kwargs : dict
optional keyword arguments to be passed to the optimizer or the statistic
verblevel : int
0-silent
1-print summary upon completion,
2-print summary after each iteration
maxiter : int
maximum number of iterations
epsilon : float
the step size for finite-difference derivative estimates
acc : float
Requested accuracy
equivalencies : list or None, optional, keyword-only
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
model_copy = _validate_model(model, self._opt_method.supported_constraints)
model_copy.sync_constraints = False
farg = _convert_input(x, y, z)
farg = (model_copy, weights, ) + farg
init_values, _, _ = model_to_fit_params(model_copy)
fitparams, self.fit_info = self._opt_method(
self.objective_function, init_values, farg, **kwargs)
fitter_to_model_params(model_copy, fitparams)
model_copy.sync_constraints = True
return model_copy
class SimplexLSQFitter(Fitter):
"""
Simplex algorithm and least squares statistic.
Raises
------
`ModelLinearityError`
A linear model is passed to a nonlinear fitter
"""
supported_constraints = Simplex.supported_constraints
def __init__(self):
super().__init__(optimizer=Simplex, statistic=leastsquare)
self.fit_info = {}
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array, optional
input coordinates
weights : array, optional
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
kwargs : dict
optional keyword arguments to be passed to the optimizer or the statistic
maxiter : int
maximum number of iterations
acc : float
Relative error in approximate solution
equivalencies : list or None, optional, keyword-only
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
model_copy = _validate_model(model,
self._opt_method.supported_constraints)
model_copy.sync_constraints = False
farg = _convert_input(x, y, z)
farg = (model_copy, weights, ) + farg
init_values, _, _ = model_to_fit_params(model_copy)
fitparams, self.fit_info = self._opt_method(
self.objective_function, init_values, farg, **kwargs)
fitter_to_model_params(model_copy, fitparams)
model_copy.sync_constraints = True
return model_copy
class JointFitter(metaclass=_FitterMeta):
"""
Fit models which share a parameter.
For example, fit two gaussians to two data sets but keep
the FWHM the same.
Parameters
----------
models : list
a list of model instances
jointparameters : list
a list of joint parameters
initvals : list
a list of initial values
"""
def __init__(self, models, jointparameters, initvals):
self.models = list(models)
self.initvals = list(initvals)
self.jointparams = jointparameters
self._verify_input()
self.fitparams = self.model_to_fit_params()
# a list of model.n_inputs
self.modeldims = [m.n_inputs for m in self.models]
# sum all model dimensions
self.ndim = np.sum(self.modeldims)
def model_to_fit_params(self):
fparams = []
fparams.extend(self.initvals)
for model in self.models:
params = model.parameters.tolist()
joint_params = self.jointparams[model]
param_metrics = model._param_metrics
for param_name in joint_params:
slice_ = param_metrics[param_name]['slice']
del params[slice_]
fparams.extend(params)
return fparams
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
the fitted parameters - result of an one iteration of the
fitting algorithm
args : dict
tuple of measured and input coordinates
args is always passed as a tuple from optimize.leastsq
"""
lstsqargs = list(args)
fitted = []
fitparams = list(fps)
numjp = len(self.initvals)
# make a separate list of the joint fitted parameters
jointfitparams = fitparams[:numjp]
del fitparams[:numjp]
for model in self.models:
joint_params = self.jointparams[model]
margs = lstsqargs[:model.n_inputs + 1]
del lstsqargs[:model.n_inputs + 1]
# separate each model separately fitted parameters
numfp = len(model._parameters) - len(joint_params)
mfparams = fitparams[:numfp]
del fitparams[:numfp]
# recreate the model parameters
mparams = []
param_metrics = model._param_metrics
for param_name in model.param_names:
if param_name in joint_params:
index = joint_params.index(param_name)
# should do this with slices in case the
# parameter is not a number
mparams.extend([jointfitparams[index]])
else:
slice_ = param_metrics[param_name]['slice']
plen = slice_.stop - slice_.start
mparams.extend(mfparams[:plen])
del mfparams[:plen]
modelfit = model.evaluate(margs[:-1], *mparams)
fitted.extend(modelfit - margs[-1])
return np.ravel(fitted)
def _verify_input(self):
if len(self.models) <= 1:
raise TypeError(f"Expected >1 models, {len(self.models)} is given")
if len(self.jointparams.keys()) < 2:
raise TypeError("At least two parameters are expected, "
f"{len(self.jointparams.keys())} is given")
for j in self.jointparams.keys():
if len(self.jointparams[j]) != len(self.initvals):
raise TypeError(f"{len(self.jointparams[j])} parameter(s) "
f"provided but {len(self.initvals)} expected")
def __call__(self, *args):
"""
Fit data to these models keeping some of the parameters common to the
two models.
"""
from scipy import optimize
if len(args) != reduce(lambda x, y: x + 1 + y + 1, self.modeldims):
raise ValueError(f"Expected {reduce(lambda x, y: x + 1 + y + 1, self.modeldims)} "
f"coordinates in args but {len(args)} provided")
self.fitparams[:], _ = optimize.leastsq(self.objective_function,
self.fitparams, args=args)
fparams = self.fitparams[:]
numjp = len(self.initvals)
# make a separate list of the joint fitted parameters
jointfitparams = fparams[:numjp]
del fparams[:numjp]
for model in self.models:
# extract each model's fitted parameters
joint_params = self.jointparams[model]
numfp = len(model._parameters) - len(joint_params)
mfparams = fparams[:numfp]
del fparams[:numfp]
# recreate the model parameters
mparams = []
param_metrics = model._param_metrics
for param_name in model.param_names:
if param_name in joint_params:
index = joint_params.index(param_name)
# should do this with slices in case the parameter
# is not a number
mparams.extend([jointfitparams[index]])
else:
slice_ = param_metrics[param_name]['slice']
plen = slice_.stop - slice_.start
mparams.extend(mfparams[:plen])
del mfparams[:plen]
model.parameters = np.array(mparams)
def _convert_input(x, y, z=None, n_models=1, model_set_axis=0):
"""Convert inputs to float arrays."""
x = np.asanyarray(x, dtype=float)
y = np.asanyarray(y, dtype=float)
if z is not None:
z = np.asanyarray(z, dtype=float)
data_ndim, data_shape = z.ndim, z.shape
else:
data_ndim, data_shape = y.ndim, y.shape
# For compatibility with how the linear fitter code currently expects to
# work, shift the dependent variable's axes to the expected locations
if n_models > 1 or data_ndim > x.ndim:
if (model_set_axis or 0) >= data_ndim:
raise ValueError("model_set_axis out of range")
if data_shape[model_set_axis] != n_models:
raise ValueError(
"Number of data sets (y or z array) is expected to equal "
"the number of parameter sets"
)
if z is None:
# For a 1-D model the y coordinate's model-set-axis is expected to
# be last, so that its first dimension is the same length as the x
# coordinates. This is in line with the expectations of
# numpy.linalg.lstsq:
# https://numpy.org/doc/stable/reference/generated/numpy.linalg.lstsq.html
# That is, each model should be represented by a column. TODO:
# Obviously this is a detail of np.linalg.lstsq and should be
# handled specifically by any fitters that use it...
y = np.rollaxis(y, model_set_axis, y.ndim)
data_shape = y.shape[:-1]
else:
# Shape of z excluding model_set_axis
data_shape = (z.shape[:model_set_axis] +
z.shape[model_set_axis + 1:])
if z is None:
if data_shape != x.shape:
raise ValueError("x and y should have the same shape")
farg = (x, y)
else:
if not (x.shape == y.shape == data_shape):
raise ValueError("x, y and z should have the same shape")
farg = (x, y, z)
return farg
# TODO: These utility functions are really particular to handling
# bounds/tied/fixed constraints for scipy.optimize optimizers that do not
# support them inherently; this needs to be reworked to be clear about this
# distinction (and the fact that these are not necessarily applicable to any
# arbitrary fitter--as evidenced for example by the fact that JointFitter has
# its own versions of these)
# TODO: Most of this code should be entirely rewritten; it should not be as
# inefficient as it is.
def fitter_to_model_params(model, fps, use_min_max_bounds=True):
"""
Constructs the full list of model parameters from the fitted and
constrained parameters.
Parameters
----------
model :
The model being fit
fps :
The fit parameter values to be assigned
use_min_max_bounds: bool
If the set parameter bounds for model will be enforced on each
parameter with bounds.
Default: True
"""
_, fit_param_indices, _ = model_to_fit_params(model)
has_tied = any(model.tied.values())
has_fixed = any(model.fixed.values())
has_bound = any(b != (None, None) for b in model.bounds.values())
parameters = model.parameters
if not (has_tied or has_fixed or has_bound):
# We can just assign directly
model.parameters = fps
return
fit_param_indices = set(fit_param_indices)
offset = 0
param_metrics = model._param_metrics
for idx, name in enumerate(model.param_names):
if idx not in fit_param_indices:
continue
slice_ = param_metrics[name]['slice']
shape = param_metrics[name]['shape']
# This is determining which range of fps (the fitted parameters) maps
# to parameters of the model
size = reduce(operator.mul, shape, 1)
values = fps[offset:offset + size]
# Check bounds constraints
if model.bounds[name] != (None, None) and use_min_max_bounds:
_min, _max = model.bounds[name]
if _min is not None:
values = np.fmax(values, _min)
if _max is not None:
values = np.fmin(values, _max)
parameters[slice_] = values
offset += size
# Update model parameters before calling ``tied`` constraints.
model._array_to_parameters()
# This has to be done in a separate loop due to how tied parameters are
# currently evaluated (the fitted parameters need to actually be *set* on
# the model first, for use in evaluating the "tied" expression--it might be
# better to change this at some point
if has_tied:
for idx, name in enumerate(model.param_names):
if model.tied[name]:
value = model.tied[name](model)
slice_ = param_metrics[name]['slice']
# To handle multiple tied constraints, model parameters
# need to be updated after each iteration.
parameters[slice_] = value
model._array_to_parameters()
@deprecated('5.1', 'private method: _fitter_to_model_params has been made public now')
def _fitter_to_model_params(model, fps):
return fitter_to_model_params(model, fps)
def model_to_fit_params(model):
"""
Convert a model instance's parameter array to an array that can be used
with a fitter that doesn't natively support fixed or tied parameters.
In particular, it removes fixed/tied parameters from the parameter
array.
These may be a subset of the model parameters, if some of them are held
constant or tied.
"""
fitparam_indices = list(range(len(model.param_names)))
model_params = model.parameters
model_bounds = list(model.bounds.values())
if any(model.fixed.values()) or any(model.tied.values()):
params = list(model_params)
param_metrics = model._param_metrics
for idx, name in list(enumerate(model.param_names))[::-1]:
if model.fixed[name] or model.tied[name]:
slice_ = param_metrics[name]['slice']
del params[slice_]
del model_bounds[slice_]
del fitparam_indices[idx]
model_params = np.array(params)
for idx, bound in enumerate(model_bounds):
if bound[0] is None:
lower = -np.inf
else:
lower = bound[0]
if bound[1] is None:
upper = np.inf
else:
upper = bound[1]
model_bounds[idx] = (lower, upper)
model_bounds = tuple(zip(*model_bounds))
return model_params, fitparam_indices, model_bounds
@deprecated('5.1', 'private method: _model_to_fit_params has been made public now')
def _model_to_fit_params(model):
return model_to_fit_params(model)
def _validate_constraints(supported_constraints, model):
"""Make sure model constraints are supported by the current fitter."""
message = 'Optimizer cannot handle {0} constraints.'
if (any(model.fixed.values()) and
'fixed' not in supported_constraints):
raise UnsupportedConstraintError(
message.format('fixed parameter'))
if any(model.tied.values()) and 'tied' not in supported_constraints:
raise UnsupportedConstraintError(
message.format('tied parameter'))
if (any(tuple(b) != (None, None) for b in model.bounds.values()) and
'bounds' not in supported_constraints):
raise UnsupportedConstraintError(
message.format('bound parameter'))
if model.eqcons and 'eqcons' not in supported_constraints:
raise UnsupportedConstraintError(message.format('equality'))
if model.ineqcons and 'ineqcons' not in supported_constraints:
raise UnsupportedConstraintError(message.format('inequality'))
def _validate_model(model, supported_constraints):
"""
Check that model and fitter are compatible and return a copy of the model.
"""
if not model.fittable:
raise ValueError("Model does not appear to be fittable.")
if model.linear:
warnings.warn('Model is linear in parameters; '
'consider using linear fitting methods.',
AstropyUserWarning)
elif len(model) != 1:
# for now only single data sets ca be fitted
raise ValueError("Non-linear fitters can only fit "
"one data set at a time.")
_validate_constraints(supported_constraints, model)
model_copy = model.copy()
return model_copy
def populate_entry_points(entry_points):
"""
This injects entry points into the `astropy.modeling.fitting` namespace.
This provides a means of inserting a fitting routine without requirement
of it being merged into astropy's core.
Parameters
----------
entry_points : list of `~importlib.metadata.EntryPoint`
entry_points are objects which encapsulate importable objects and
are defined on the installation of a package.
Notes
-----
An explanation of entry points can be found `here
<http://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins>`_
"""
for entry_point in entry_points:
name = entry_point.name
try:
entry_point = entry_point.load()
except Exception as e:
# This stops the fitting from choking if an entry_point produces an error.
warnings.warn(AstropyUserWarning(
f'{type(e).__name__} error occurred in entry point {name}.'))
else:
if not inspect.isclass(entry_point):
warnings.warn(AstropyUserWarning(
f'Modeling entry point {name} expected to be a Class.'))
else:
if issubclass(entry_point, Fitter):
name = entry_point.__name__
globals()[name] = entry_point
__all__.append(name)
else:
warnings.warn(AstropyUserWarning(
f"Modeling entry point {name} expected to extend "
"astropy.modeling.Fitter"))
def _populate_ep():
# TODO: Exclusively use select when Python minversion is 3.10
ep = entry_points()
if hasattr(ep, 'select'):
populate_entry_points(ep.select(group='astropy.modeling'))
else:
populate_entry_points(ep.get('astropy.modeling', []))
_populate_ep()
|
7947fa9d7997b70f7b58046a766adaa2f13792dabc39b33fa276fd0642875ebf | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines base classes for all models. The base class of all
models is `~astropy.modeling.Model`. `~astropy.modeling.FittableModel` is
the base class for all fittable models. Fittable models can be linear or
nonlinear in a regression analysis sense.
All models provide a `__call__` method which performs the transformation in
a purely mathematical way, i.e. the models are unitless. Model instances can
represent either a single model, or a "model set" representing multiple copies
of the same type of model, but with potentially different values of the
parameters in each model making up the set.
"""
# pylint: disable=invalid-name, protected-access, redefined-outer-name
import abc
import copy
import functools
import inspect
import itertools
import operator
import types
from collections import defaultdict, deque
from inspect import signature
from itertools import chain
import numpy as np
from astropy.nddata.utils import add_array, extract_array
from astropy.table import Table
from astropy.units import Quantity, UnitsError, dimensionless_unscaled
from astropy.units.utils import quantity_asanyarray
from astropy.utils import (
IncompatibleShapeError, check_broadcast, find_current_module, indent, isiterable, metadata,
sharedmethod)
from astropy.utils.codegen import make_function_with_signature
from .bounding_box import CompoundBoundingBox, ModelBoundingBox
from .parameters import InputParameterError, Parameter, _tofloat, param_repr_oneline
from .utils import (
_combine_equivalency_dict, _ConstraintsDict, _SpecialOperatorsDict, combine_labels,
get_inputs_and_params, make_binary_operator_eval)
__all__ = ['Model', 'FittableModel', 'Fittable1DModel', 'Fittable2DModel',
'CompoundModel', 'fix_inputs', 'custom_model', 'ModelDefinitionError',
'bind_bounding_box', 'bind_compound_bounding_box']
def _model_oper(oper, **kwargs):
"""
Returns a function that evaluates a given Python arithmetic operator
between two models. The operator should be given as a string, like ``'+'``
or ``'**'``.
"""
return lambda left, right: CompoundModel(oper, left, right, **kwargs)
class ModelDefinitionError(TypeError):
"""Used for incorrect models definitions."""
class _ModelMeta(abc.ABCMeta):
"""
Metaclass for Model.
Currently just handles auto-generating the param_names list based on
Parameter descriptors declared at the class-level of Model subclasses.
"""
_is_dynamic = False
"""
This flag signifies whether this class was created in the "normal" way,
with a class statement in the body of a module, as opposed to a call to
`type` or some other metaclass constructor, such that the resulting class
does not belong to a specific module. This is important for pickling of
dynamic classes.
This flag is always forced to False for new classes, so code that creates
dynamic classes should manually set it to True on those classes when
creating them.
"""
# Default empty dict for _parameters_, which will be empty on model
# classes that don't have any Parameters
def __new__(mcls, name, bases, members, **kwds):
# See the docstring for _is_dynamic above
if '_is_dynamic' not in members:
members['_is_dynamic'] = mcls._is_dynamic
opermethods = [
('__add__', _model_oper('+')),
('__sub__', _model_oper('-')),
('__mul__', _model_oper('*')),
('__truediv__', _model_oper('/')),
('__pow__', _model_oper('**')),
('__or__', _model_oper('|')),
('__and__', _model_oper('&')),
('_fix_inputs', _model_oper('fix_inputs'))
]
members['_parameters_'] = {k: v for k, v in members.items()
if isinstance(v, Parameter)}
for opermethod, opercall in opermethods:
members[opermethod] = opercall
cls = super().__new__(mcls, name, bases, members, **kwds)
param_names = list(members['_parameters_'])
# Need to walk each base MRO to collect all parameter names
for base in bases:
for tbase in base.__mro__:
if issubclass(tbase, Model):
# Preserve order of definitions
param_names = list(tbase._parameters_) + param_names
# Remove duplicates (arising from redefinition in subclass).
param_names = list(dict.fromkeys(param_names))
if cls._parameters_:
if hasattr(cls, '_param_names'):
# Slight kludge to support compound models, where
# cls.param_names is a property; could be improved with a
# little refactoring but fine for now
cls._param_names = tuple(param_names)
else:
cls.param_names = tuple(param_names)
return cls
def __init__(cls, name, bases, members, **kwds):
super().__init__(name, bases, members, **kwds)
cls._create_inverse_property(members)
cls._create_bounding_box_property(members)
pdict = {}
for base in bases:
for tbase in base.__mro__:
if issubclass(tbase, Model):
for parname, val in cls._parameters_.items():
pdict[parname] = val
cls._handle_special_methods(members, pdict)
def __repr__(cls):
"""
Custom repr for Model subclasses.
"""
return cls._format_cls_repr()
def _repr_pretty_(cls, p, cycle):
"""
Repr for IPython's pretty printer.
By default IPython "pretty prints" classes, so we need to implement
this so that IPython displays the custom repr for Models.
"""
p.text(repr(cls))
def __reduce__(cls):
if not cls._is_dynamic:
# Just return a string specifying where the class can be imported
# from
return cls.__name__
members = dict(cls.__dict__)
# Delete any ABC-related attributes--these will be restored when
# the class is reconstructed:
for key in list(members):
if key.startswith('_abc_'):
del members[key]
# Delete custom __init__ and __call__ if they exist:
for key in ('__init__', '__call__'):
if key in members:
del members[key]
return (type(cls), (cls.__name__, cls.__bases__, members))
@property
def name(cls):
"""
The name of this model class--equivalent to ``cls.__name__``.
This attribute is provided for symmetry with the `Model.name` attribute
of model instances.
"""
return cls.__name__
@property
def _is_concrete(cls):
"""
A class-level property that determines whether the class is a concrete
implementation of a Model--i.e. it is not some abstract base class or
internal implementation detail (i.e. begins with '_').
"""
return not (cls.__name__.startswith('_') or inspect.isabstract(cls))
def rename(cls, name=None, inputs=None, outputs=None):
"""
Creates a copy of this model class with a new name, inputs or outputs.
The new class is technically a subclass of the original class, so that
instance and type checks will still work. For example::
>>> from astropy.modeling.models import Rotation2D
>>> SkyRotation = Rotation2D.rename('SkyRotation')
>>> SkyRotation
<class 'astropy.modeling.core.SkyRotation'>
Name: SkyRotation (Rotation2D)
N_inputs: 2
N_outputs: 2
Fittable parameters: ('angle',)
>>> issubclass(SkyRotation, Rotation2D)
True
>>> r = SkyRotation(90)
>>> isinstance(r, Rotation2D)
True
"""
mod = find_current_module(2)
if mod:
modname = mod.__name__
else:
modname = '__main__'
if name is None:
name = cls.name
if inputs is None:
inputs = cls.inputs
else:
if not isinstance(inputs, tuple):
raise TypeError("Expected 'inputs' to be a tuple of strings.")
elif len(inputs) != len(cls.inputs):
raise ValueError(f'{cls.name} expects {len(cls.inputs)} inputs')
if outputs is None:
outputs = cls.outputs
else:
if not isinstance(outputs, tuple):
raise TypeError("Expected 'outputs' to be a tuple of strings.")
elif len(outputs) != len(cls.outputs):
raise ValueError(f'{cls.name} expects {len(cls.outputs)} outputs')
new_cls = type(name, (cls,), {"inputs": inputs, "outputs": outputs})
new_cls.__module__ = modname
new_cls.__qualname__ = name
return new_cls
def _create_inverse_property(cls, members):
inverse = members.get('inverse')
if inverse is None or cls.__bases__[0] is object:
# The latter clause is the prevent the below code from running on
# the Model base class, which implements the default getter and
# setter for .inverse
return
if isinstance(inverse, property):
# We allow the @property decorator to be omitted entirely from
# the class definition, though its use should be encouraged for
# clarity
inverse = inverse.fget
# Store the inverse getter internally, then delete the given .inverse
# attribute so that cls.inverse resolves to Model.inverse instead
cls._inverse = inverse
del cls.inverse
def _create_bounding_box_property(cls, members):
"""
Takes any bounding_box defined on a concrete Model subclass (either
as a fixed tuple or a property or method) and wraps it in the generic
getter/setter interface for the bounding_box attribute.
"""
# TODO: Much of this is verbatim from _create_inverse_property--I feel
# like there could be a way to generify properties that work this way,
# but for the time being that would probably only confuse things more.
bounding_box = members.get('bounding_box')
if bounding_box is None or cls.__bases__[0] is object:
return
if isinstance(bounding_box, property):
bounding_box = bounding_box.fget
if not callable(bounding_box):
# See if it's a hard-coded bounding_box (as a sequence) and
# normalize it
try:
bounding_box = ModelBoundingBox.validate(cls, bounding_box, _preserve_ignore=True)
except ValueError as exc:
raise ModelDefinitionError(exc.args[0])
else:
sig = signature(bounding_box)
# May be a method that only takes 'self' as an argument (like a
# property, but the @property decorator was forgotten)
#
# However, if the method takes additional arguments then this is a
# parameterized bounding box and should be callable
if len(sig.parameters) > 1:
bounding_box = cls._create_bounding_box_subclass(bounding_box, sig)
# See the Model.bounding_box getter definition for how this attribute
# is used
cls._bounding_box = bounding_box
del cls.bounding_box
def _create_bounding_box_subclass(cls, func, sig):
"""
For Models that take optional arguments for defining their bounding
box, we create a subclass of ModelBoundingBox with a ``__call__`` method
that supports those additional arguments.
Takes the function's Signature as an argument since that is already
computed in _create_bounding_box_property, so no need to duplicate that
effort.
"""
# TODO: Might be convenient if calling the bounding box also
# automatically sets the _user_bounding_box. So that
#
# >>> model.bounding_box(arg=1)
#
# in addition to returning the computed bbox, also sets it, so that
# it's a shortcut for
#
# >>> model.bounding_box = model.bounding_box(arg=1)
#
# Not sure if that would be non-obvious / confusing though...
def __call__(self, **kwargs):
return func(self._model, **kwargs)
kwargs = []
for idx, param in enumerate(sig.parameters.values()):
if idx == 0:
# Presumed to be a 'self' argument
continue
if param.default is param.empty:
raise ModelDefinitionError(
f"The bounding_box method for {cls.name} is not correctly "
"defined: If defined as a method all arguments to that "
"method (besides self) must be keyword arguments with "
"default values that can be used to compute a default "
"bounding box.")
kwargs.append((param.name, param.default))
__call__.__signature__ = sig
return type(f"{cls.name}ModelBoundingBox", (ModelBoundingBox,),
{'__call__': __call__})
def _handle_special_methods(cls, members, pdict):
# Handle init creation from inputs
def update_wrapper(wrapper, cls):
# Set up the new __call__'s metadata attributes as though it were
# manually defined in the class definition
# A bit like functools.update_wrapper but uses the class instead of
# the wrapped function
wrapper.__module__ = cls.__module__
wrapper.__doc__ = getattr(cls, wrapper.__name__).__doc__
if hasattr(cls, '__qualname__'):
wrapper.__qualname__ = f'{cls.__qualname__}.{wrapper.__name__}'
if ('__call__' not in members and 'n_inputs' in members and
isinstance(members['n_inputs'], int) and members['n_inputs'] > 0):
# Don't create a custom __call__ for classes that already have one
# explicitly defined (this includes the Model base class, and any
# other classes that manually override __call__
def __call__(self, *inputs, **kwargs):
"""Evaluate this model on the supplied inputs."""
return super(cls, self).__call__(*inputs, **kwargs)
# When called, models can take two optional keyword arguments:
#
# * model_set_axis, which indicates (for multi-dimensional input)
# which axis is used to indicate different models
#
# * equivalencies, a dictionary of equivalencies to be applied to
# the input values, where each key should correspond to one of
# the inputs.
#
# The following code creates the __call__ function with these
# two keyword arguments.
args = ('self',)
kwargs = dict([('model_set_axis', None),
('with_bounding_box', False),
('fill_value', np.nan),
('equivalencies', None),
('inputs_map', None)])
new_call = make_function_with_signature(
__call__, args, kwargs, varargs='inputs', varkwargs='new_inputs')
# The following makes it look like __call__
# was defined in the class
update_wrapper(new_call, cls)
cls.__call__ = new_call
if ('__init__' not in members and not inspect.isabstract(cls) and
cls._parameters_):
# Build list of all parameters including inherited ones
# If *all* the parameters have default values we can make them
# keyword arguments; otherwise they must all be positional
# arguments
if all(p.default is not None for p in pdict.values()):
args = ('self',)
kwargs = []
for param_name, param_val in pdict.items():
default = param_val.default
unit = param_val.unit
# If the unit was specified in the parameter but the
# default is not a Quantity, attach the unit to the
# default.
if unit is not None:
default = Quantity(default, unit, copy=False)
kwargs.append((param_name, default))
else:
args = ('self',) + tuple(pdict.keys())
kwargs = {}
def __init__(self, *params, **kwargs):
return super(cls, self).__init__(*params, **kwargs)
new_init = make_function_with_signature(
__init__, args, kwargs, varkwargs='kwargs')
update_wrapper(new_init, cls)
cls.__init__ = new_init
# *** Arithmetic operators for creating compound models ***
__add__ = _model_oper('+')
__sub__ = _model_oper('-')
__mul__ = _model_oper('*')
__truediv__ = _model_oper('/')
__pow__ = _model_oper('**')
__or__ = _model_oper('|')
__and__ = _model_oper('&')
_fix_inputs = _model_oper('fix_inputs')
# *** Other utilities ***
def _format_cls_repr(cls, keywords=[]):
"""
Internal implementation of ``__repr__``.
This is separated out for ease of use by subclasses that wish to
override the default ``__repr__`` while keeping the same basic
formatting.
"""
# For the sake of familiarity start the output with the standard class
# __repr__
parts = [super().__repr__()]
if not cls._is_concrete:
return parts[0]
def format_inheritance(cls):
bases = []
for base in cls.mro()[1:]:
if not issubclass(base, Model):
continue
elif (inspect.isabstract(base) or
base.__name__.startswith('_')):
break
bases.append(base.name)
if bases:
return f"{cls.name} ({' -> '.join(bases)})"
return cls.name
try:
default_keywords = [
('Name', format_inheritance(cls)),
('N_inputs', cls.n_inputs),
('N_outputs', cls.n_outputs),
]
if cls.param_names:
default_keywords.append(('Fittable parameters',
cls.param_names))
for keyword, value in default_keywords + keywords:
if value is not None:
parts.append(f'{keyword}: {value}')
return '\n'.join(parts)
except Exception:
# If any of the above formatting fails fall back on the basic repr
# (this is particularly useful in debugging)
return parts[0]
class Model(metaclass=_ModelMeta):
"""
Base class for all models.
This is an abstract class and should not be instantiated directly.
The following initialization arguments apply to the majority of Model
subclasses by default (exceptions include specialized utility models
like `~astropy.modeling.mappings.Mapping`). Parametric models take all
their parameters as arguments, followed by any of the following optional
keyword arguments:
Parameters
----------
name : str, optional
A human-friendly name associated with this model instance
(particularly useful for identifying the individual components of a
compound model).
meta : dict, optional
An optional dict of user-defined metadata to attach to this model.
How this is used and interpreted is up to the user or individual use
case.
n_models : int, optional
If given an integer greater than 1, a *model set* is instantiated
instead of a single model. This affects how the parameter arguments
are interpreted. In this case each parameter must be given as a list
or array--elements of this array are taken along the first axis (or
``model_set_axis`` if specified), such that the Nth element is the
value of that parameter for the Nth model in the set.
See the section on model sets in the documentation for more details.
model_set_axis : int, optional
This argument only applies when creating a model set (i.e. ``n_models >
1``). It changes how parameter values are interpreted. Normally the
first axis of each input parameter array (properly the 0th axis) is
taken as the axis corresponding to the model sets. However, any axis
of an input array may be taken as this "model set axis". This accepts
negative integers as well--for example use ``model_set_axis=-1`` if the
last (most rapidly changing) axis should be associated with the model
sets. Also, ``model_set_axis=False`` can be used to tell that a given
input should be used to evaluate all the models in the model set.
fixed : dict, optional
Dictionary ``{parameter_name: bool}`` setting the fixed constraint
for one or more parameters. `True` means the parameter is held fixed
during fitting and is prevented from updates once an instance of the
model has been created.
Alternatively the `~astropy.modeling.Parameter.fixed` property of a
parameter may be used to lock or unlock individual parameters.
tied : dict, optional
Dictionary ``{parameter_name: callable}`` of parameters which are
linked to some other parameter. The dictionary values are callables
providing the linking relationship.
Alternatively the `~astropy.modeling.Parameter.tied` property of a
parameter may be used to set the ``tied`` constraint on individual
parameters.
bounds : dict, optional
A dictionary ``{parameter_name: value}`` of lower and upper bounds of
parameters. Keys are parameter names. Values are a list or a tuple
of length 2 giving the desired range for the parameter.
Alternatively the `~astropy.modeling.Parameter.min` and
`~astropy.modeling.Parameter.max` or
~astropy.modeling.Parameter.bounds` properties of a parameter may be
used to set bounds on individual parameters.
eqcons : list, optional
List of functions of length n such that ``eqcons[j](x0, *args) == 0.0``
in a successfully optimized problem.
ineqcons : list, optional
List of functions of length n such that ``ieqcons[j](x0, *args) >=
0.0`` is a successfully optimized problem.
Examples
--------
>>> from astropy.modeling import models
>>> def tie_center(model):
... mean = 50 * model.stddev
... return mean
>>> tied_parameters = {'mean': tie_center}
Specify that ``'mean'`` is a tied parameter in one of two ways:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... tied=tied_parameters)
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.mean.tied
False
>>> g1.mean.tied = tie_center
>>> g1.mean.tied
<function tie_center at 0x...>
Fixed parameters:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... fixed={'stddev': True})
>>> g1.stddev.fixed
True
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.stddev.fixed
False
>>> g1.stddev.fixed = True
>>> g1.stddev.fixed
True
"""
parameter_constraints = Parameter.constraints
"""
Primarily for informational purposes, these are the types of constraints
that can be set on a model's parameters.
"""
model_constraints = ('eqcons', 'ineqcons')
"""
Primarily for informational purposes, these are the types of constraints
that constrain model evaluation.
"""
param_names = ()
"""
Names of the parameters that describe models of this type.
The parameters in this tuple are in the same order they should be passed in
when initializing a model of a specific type. Some types of models, such
as polynomial models, have a different number of parameters depending on
some other property of the model, such as the degree.
When defining a custom model class the value of this attribute is
automatically set by the `~astropy.modeling.Parameter` attributes defined
in the class body.
"""
n_inputs = 0
"""The number of inputs."""
n_outputs = 0
""" The number of outputs."""
standard_broadcasting = True
fittable = False
linear = True
_separable = None
""" A boolean flag to indicate whether a model is separable."""
meta = metadata.MetaData()
"""A dict-like object to store optional information."""
# By default models either use their own inverse property or have no
# inverse at all, but users may also assign a custom inverse to a model,
# optionally; in that case it is of course up to the user to determine
# whether their inverse is *actually* an inverse to the model they assign
# it to.
_inverse = None
_user_inverse = None
_bounding_box = None
_user_bounding_box = None
_has_inverse_bounding_box = False
# Default n_models attribute, so that __len__ is still defined even when a
# model hasn't completed initialization yet
_n_models = 1
# New classes can set this as a boolean value.
# It is converted to a dictionary mapping input name to a boolean value.
_input_units_strict = False
# Allow dimensionless input (and corresponding output). If this is True,
# input values to evaluate will gain the units specified in input_units. If
# this is a dictionary then it should map input name to a bool to allow
# dimensionless numbers for that input.
# Only has an effect if input_units is defined.
_input_units_allow_dimensionless = False
# Default equivalencies to apply to input values. If set, this should be a
# dictionary where each key is a string that corresponds to one of the
# model inputs. Only has an effect if input_units is defined.
input_units_equivalencies = None
# Covariance matrix can be set by fitter if available.
# If cov_matrix is available, then std will set as well
_cov_matrix = None
_stds = None
def __init_subclass__(cls, **kwargs):
super().__init_subclass__()
def __init__(self, *args, meta=None, name=None, **kwargs):
super().__init__()
self._default_inputs_outputs()
if meta is not None:
self.meta = meta
self._name = name
# add parameters to instance level by walking MRO list
mro = self.__class__.__mro__
for cls in mro:
if issubclass(cls, Model):
for parname, val in cls._parameters_.items():
newpar = copy.deepcopy(val)
newpar.model = self
if parname not in self.__dict__:
self.__dict__[parname] = newpar
self._initialize_constraints(kwargs)
kwargs = self._initialize_setters(kwargs)
# Remaining keyword args are either parameter values or invalid
# Parameter values must be passed in as keyword arguments in order to
# distinguish them
self._initialize_parameters(args, kwargs)
self._initialize_slices()
self._initialize_unit_support()
def _default_inputs_outputs(self):
if self.n_inputs == 1 and self.n_outputs == 1:
self._inputs = ("x",)
self._outputs = ("y",)
elif self.n_inputs == 2 and self.n_outputs == 1:
self._inputs = ("x", "y")
self._outputs = ("z",)
else:
try:
self._inputs = tuple("x" + str(idx) for idx in range(self.n_inputs))
self._outputs = tuple("x" + str(idx) for idx in range(self.n_outputs))
except TypeError:
# self.n_inputs and self.n_outputs are properties
# This is the case when subclasses of Model do not define
# ``n_inputs``, ``n_outputs``, ``inputs`` or ``outputs``.
self._inputs = ()
self._outputs = ()
def _initialize_setters(self, kwargs):
"""
This exists to inject defaults for settable properties for models
originating from `custom_model`.
"""
if hasattr(self, '_settable_properties'):
setters = {name: kwargs.pop(name, default)
for name, default in self._settable_properties.items()}
for name, value in setters.items():
setattr(self, name, value)
return kwargs
@property
def inputs(self):
return self._inputs
@inputs.setter
def inputs(self, val):
if len(val) != self.n_inputs:
raise ValueError(f"Expected {self.n_inputs} number of inputs, got {len(val)}.")
self._inputs = val
self._initialize_unit_support()
@property
def outputs(self):
return self._outputs
@outputs.setter
def outputs(self, val):
if len(val) != self.n_outputs:
raise ValueError(f"Expected {self.n_outputs} number of outputs, got {len(val)}.")
self._outputs = val
@property
def n_inputs(self):
# TODO: remove the code in the ``if`` block when support
# for models with ``inputs`` as class variables is removed.
if hasattr(self.__class__, 'n_inputs') and isinstance(self.__class__.n_inputs, property):
try:
return len(self.__class__.inputs)
except TypeError:
try:
return len(self.inputs)
except AttributeError:
return 0
return self.__class__.n_inputs
@property
def n_outputs(self):
# TODO: remove the code in the ``if`` block when support
# for models with ``outputs`` as class variables is removed.
if hasattr(self.__class__, 'n_outputs') and isinstance(self.__class__.n_outputs, property):
try:
return len(self.__class__.outputs)
except TypeError:
try:
return len(self.outputs)
except AttributeError:
return 0
return self.__class__.n_outputs
def _calculate_separability_matrix(self):
"""
This is a hook which customises the behavior of modeling.separable.
This allows complex subclasses to customise the separability matrix.
If it returns `NotImplemented` the default behavior is used.
"""
return NotImplemented
def _initialize_unit_support(self):
"""
Convert self._input_units_strict and
self.input_units_allow_dimensionless to dictionaries
mapping input name to a boolean value.
"""
if isinstance(self._input_units_strict, bool):
self._input_units_strict = {key: self._input_units_strict for
key in self.inputs}
if isinstance(self._input_units_allow_dimensionless, bool):
self._input_units_allow_dimensionless = {key: self._input_units_allow_dimensionless
for key in self.inputs}
@property
def input_units_strict(self):
"""
Enforce strict units on inputs to evaluate. If this is set to True,
input values to evaluate will be in the exact units specified by
input_units. If the input quantities are convertible to input_units,
they are converted. If this is a dictionary then it should map input
name to a bool to set strict input units for that parameter.
"""
val = self._input_units_strict
if isinstance(val, bool):
return {key: val for key in self.inputs}
return dict(zip(self.inputs, val.values()))
@property
def input_units_allow_dimensionless(self):
"""
Allow dimensionless input (and corresponding output). If this is True,
input values to evaluate will gain the units specified in input_units. If
this is a dictionary then it should map input name to a bool to allow
dimensionless numbers for that input.
Only has an effect if input_units is defined.
"""
val = self._input_units_allow_dimensionless
if isinstance(val, bool):
return {key: val for key in self.inputs}
return dict(zip(self.inputs, val.values()))
@property
def uses_quantity(self):
"""
True if this model has been created with `~astropy.units.Quantity`
objects or if there are no parameters.
This can be used to determine if this model should be evaluated with
`~astropy.units.Quantity` or regular floats.
"""
pisq = [isinstance(p, Quantity) for p in self._param_sets(units=True)]
return (len(pisq) == 0) or any(pisq)
def __repr__(self):
return self._format_repr()
def __str__(self):
return self._format_str()
def __len__(self):
return self._n_models
@staticmethod
def _strip_ones(intup):
return tuple(item for item in intup if item != 1)
def __setattr__(self, attr, value):
if isinstance(self, CompoundModel):
param_names = self._param_names
param_names = self.param_names
if param_names is not None and attr in self.param_names:
param = self.__dict__[attr]
value = _tofloat(value)
if param._validator is not None:
param._validator(self, value)
# check consistency with previous shape and size
eshape = self._param_metrics[attr]['shape']
if eshape == ():
eshape = (1,)
vshape = np.array(value).shape
if vshape == ():
vshape = (1,)
esize = self._param_metrics[attr]['size']
if (np.size(value) != esize or
self._strip_ones(vshape) != self._strip_ones(eshape)):
raise InputParameterError(
f"Value for parameter {attr} does not match shape or size\n"
f"expected by model ({vshape}, {np.size(value)}) vs ({eshape}, {esize})")
if param.unit is None:
if isinstance(value, Quantity):
param._unit = value.unit
param.value = value.value
else:
param.value = value
else:
if not isinstance(value, Quantity):
raise UnitsError(f"The '{param.name}' parameter should be given as a"
" Quantity because it was originally "
"initialized as a Quantity")
param._unit = value.unit
param.value = value.value
else:
if attr in ['fittable', 'linear']:
self.__dict__[attr] = value
else:
super().__setattr__(attr, value)
def _pre_evaluate(self, *args, **kwargs):
"""
Model specific input setup that needs to occur prior to model evaluation
"""
# Broadcast inputs into common size
inputs, broadcasted_shapes = self.prepare_inputs(*args, **kwargs)
# Setup actual model evaluation method
parameters = self._param_sets(raw=True, units=True)
def evaluate(_inputs):
return self.evaluate(*chain(_inputs, parameters))
return evaluate, inputs, broadcasted_shapes, kwargs
def get_bounding_box(self, with_bbox=True):
"""
Return the ``bounding_box`` of a model if it exists or ``None``
otherwise.
Parameters
----------
with_bbox :
The value of the ``with_bounding_box`` keyword argument
when calling the model. Default is `True` for usage when
looking up the model's ``bounding_box`` without risk of error.
"""
bbox = None
if not isinstance(with_bbox, bool) or with_bbox:
try:
bbox = self.bounding_box
except NotImplementedError:
pass
if isinstance(bbox, CompoundBoundingBox) and not isinstance(with_bbox, bool):
bbox = bbox[with_bbox]
return bbox
@property
def _argnames(self):
"""The inputs used to determine input_shape for bounding_box evaluation"""
return self.inputs
def _validate_input_shape(self, _input, idx, argnames, model_set_axis, check_model_set_axis):
"""
Perform basic validation of a single model input's shape
-- it has the minimum dimensions for the given model_set_axis
Returns the shape of the input if validation succeeds.
"""
input_shape = np.shape(_input)
# Ensure that the input's model_set_axis matches the model's
# n_models
if input_shape and check_model_set_axis:
# Note: Scalar inputs *only* get a pass on this
if len(input_shape) < model_set_axis + 1:
raise ValueError(
f"For model_set_axis={model_set_axis}, all inputs must be at "
f"least {model_set_axis + 1}-dimensional.")
if input_shape[model_set_axis] != self._n_models:
try:
argname = argnames[idx]
except IndexError:
# the case of model.inputs = ()
argname = str(idx)
raise ValueError(
f"Input argument '{argname}' does not have the correct "
f"dimensions in model_set_axis={model_set_axis} for a model set with "
f"n_models={self._n_models}.")
return input_shape
def _validate_input_shapes(self, inputs, argnames, model_set_axis):
"""
Perform basic validation of model inputs
--that they are mutually broadcastable and that they have
the minimum dimensions for the given model_set_axis.
If validation succeeds, returns the total shape that will result from
broadcasting the input arrays with each other.
"""
check_model_set_axis = self._n_models > 1 and model_set_axis is not False
all_shapes = []
for idx, _input in enumerate(inputs):
all_shapes.append(self._validate_input_shape(_input, idx, argnames,
model_set_axis, check_model_set_axis))
input_shape = check_broadcast(*all_shapes)
if input_shape is None:
raise ValueError(
"All inputs must have identical shapes or must be scalars.")
return input_shape
def input_shape(self, inputs):
"""Get input shape for bounding_box evaluation"""
return self._validate_input_shapes(inputs, self._argnames, self.model_set_axis)
def _generic_evaluate(self, evaluate, _inputs, fill_value, with_bbox):
"""
Generic model evaluation routine
Selects and evaluates model with or without bounding_box enforcement
"""
# Evaluate the model using the prepared evaluation method either
# enforcing the bounding_box or not.
bbox = self.get_bounding_box(with_bbox)
if (not isinstance(with_bbox, bool) or with_bbox) and bbox is not None:
outputs = bbox.evaluate(evaluate, _inputs, fill_value)
else:
outputs = evaluate(_inputs)
return outputs
def _post_evaluate(self, inputs, outputs, broadcasted_shapes, with_bbox, **kwargs):
"""
Model specific post evaluation processing of outputs
"""
if self.get_bounding_box(with_bbox) is None and self.n_outputs == 1:
outputs = (outputs,)
outputs = self.prepare_outputs(broadcasted_shapes, *outputs, **kwargs)
outputs = self._process_output_units(inputs, outputs)
if self.n_outputs == 1:
return outputs[0]
return outputs
@property
def bbox_with_units(self):
return (not isinstance(self, CompoundModel))
def __call__(self, *args, **kwargs):
"""
Evaluate this model using the given input(s) and the parameter values
that were specified when the model was instantiated.
"""
# Turn any keyword arguments into positional arguments.
args, kwargs = self._get_renamed_inputs_as_positional(*args, **kwargs)
# Read model evaluation related parameters
with_bbox = kwargs.pop('with_bounding_box', False)
fill_value = kwargs.pop('fill_value', np.nan)
# prepare for model evaluation (overridden in CompoundModel)
evaluate, inputs, broadcasted_shapes, kwargs = self._pre_evaluate(*args, **kwargs)
outputs = self._generic_evaluate(evaluate, inputs,
fill_value, with_bbox)
# post-process evaluation results (overridden in CompoundModel)
return self._post_evaluate(inputs, outputs, broadcasted_shapes, with_bbox, **kwargs)
def _get_renamed_inputs_as_positional(self, *args, **kwargs):
def _keyword2positional(kwargs):
# Inputs were passed as keyword (not positional) arguments.
# Because the signature of the ``__call__`` is defined at
# the class level, the name of the inputs cannot be changed at
# the instance level and the old names are always present in the
# signature of the method. In order to use the new names of the
# inputs, the old names are taken out of ``kwargs``, the input
# values are sorted in the order of self.inputs and passed as
# positional arguments to ``__call__``.
# These are the keys that are always present as keyword arguments.
keys = ['model_set_axis', 'with_bounding_box', 'fill_value',
'equivalencies', 'inputs_map']
new_inputs = {}
# kwargs contain the names of the new inputs + ``keys``
allkeys = list(kwargs.keys())
# Remove the names of the new inputs from kwargs and save them
# to a dict ``new_inputs``.
for key in allkeys:
if key not in keys:
new_inputs[key] = kwargs[key]
del kwargs[key]
return new_inputs, kwargs
n_args = len(args)
new_inputs, kwargs = _keyword2positional(kwargs)
n_all_args = n_args + len(new_inputs)
if n_all_args < self.n_inputs:
raise ValueError(f"Missing input arguments - expected {self.n_inputs},"
f" got {n_all_args}")
elif n_all_args > self.n_inputs:
raise ValueError(f"Too many input arguments - expected {self.n_inputs},"
f" got {n_all_args}")
if n_args == 0:
# Create positional arguments from the keyword arguments in ``new_inputs``.
new_args = []
for k in self.inputs:
new_args.append(new_inputs[k])
elif n_args != self.n_inputs:
# Some inputs are passed as positional, others as keyword arguments.
args = list(args)
# Create positional arguments from the keyword arguments in ``new_inputs``.
new_args = []
for k in self.inputs:
if k in new_inputs:
new_args.append(new_inputs[k])
else:
new_args.append(args[0])
del args[0]
else:
new_args = args
return new_args, kwargs
# *** Properties ***
@property
def name(self):
"""User-provided name for this model instance."""
return self._name
@name.setter
def name(self, val):
"""Assign a (new) name to this model."""
self._name = val
@property
def model_set_axis(self):
"""
The index of the model set axis--that is the axis of a parameter array
that pertains to which model a parameter value pertains to--as
specified when the model was initialized.
See the documentation on :ref:`astropy:modeling-model-sets`
for more details.
"""
return self._model_set_axis
@property
def param_sets(self):
"""
Return parameters as a pset.
This is a list with one item per parameter set, which is an array of
that parameter's values across all parameter sets, with the last axis
associated with the parameter set.
"""
return self._param_sets()
@property
def parameters(self):
"""
A flattened array of all parameter values in all parameter sets.
Fittable parameters maintain this list and fitters modify it.
"""
# Currently the sequence of a model's parameters must be contiguous
# within the _parameters array (which may be a view of a larger array,
# for example when taking a sub-expression of a compound model), so
# the assumption here is reliable:
if not self.param_names:
# Trivial, but not unheard of
return self._parameters
self._parameters_to_array()
start = self._param_metrics[self.param_names[0]]['slice'].start
stop = self._param_metrics[self.param_names[-1]]['slice'].stop
return self._parameters[start:stop]
@parameters.setter
def parameters(self, value):
"""
Assigning to this attribute updates the parameters array rather than
replacing it.
"""
if not self.param_names:
return
start = self._param_metrics[self.param_names[0]]['slice'].start
stop = self._param_metrics[self.param_names[-1]]['slice'].stop
try:
value = np.array(value).flatten()
self._parameters[start:stop] = value
except ValueError as e:
raise InputParameterError(
"Input parameter values not compatible with the model "
f"parameters array: {e!r}")
self._array_to_parameters()
@property
def sync_constraints(self):
'''
This is a boolean property that indicates whether or not accessing constraints
automatically check the constituent models current values. It defaults to True
on creation of a model, but for fitting purposes it should be set to False
for performance reasons.
'''
if not hasattr(self, '_sync_constraints'):
self._sync_constraints = True
return self._sync_constraints
@sync_constraints.setter
def sync_constraints(self, value):
if not isinstance(value, bool):
raise ValueError('sync_constraints only accepts True or False as values')
self._sync_constraints = value
@property
def fixed(self):
"""
A ``dict`` mapping parameter names to their fixed constraint.
"""
if not hasattr(self, '_fixed') or self.sync_constraints:
self._fixed = _ConstraintsDict(self, 'fixed')
return self._fixed
@property
def bounds(self):
"""
A ``dict`` mapping parameter names to their upper and lower bounds as
``(min, max)`` tuples or ``[min, max]`` lists.
"""
if not hasattr(self, '_bounds') or self.sync_constraints:
self._bounds = _ConstraintsDict(self, 'bounds')
return self._bounds
@property
def tied(self):
"""
A ``dict`` mapping parameter names to their tied constraint.
"""
if not hasattr(self, '_tied') or self.sync_constraints:
self._tied = _ConstraintsDict(self, 'tied')
return self._tied
@property
def eqcons(self):
"""List of parameter equality constraints."""
return self._mconstraints['eqcons']
@property
def ineqcons(self):
"""List of parameter inequality constraints."""
return self._mconstraints['ineqcons']
def has_inverse(self):
"""
Returns True if the model has an analytic or user
inverse defined.
"""
try:
self.inverse
except NotImplementedError:
return False
return True
@property
def inverse(self):
"""
Returns a new `~astropy.modeling.Model` instance which performs the
inverse transform, if an analytic inverse is defined for this model.
Even on models that don't have an inverse defined, this property can be
set with a manually-defined inverse, such a pre-computed or
experimentally determined inverse (often given as a
`~astropy.modeling.polynomial.PolynomialModel`, but not by
requirement).
A custom inverse can be deleted with ``del model.inverse``. In this
case the model's inverse is reset to its default, if a default exists
(otherwise the default is to raise `NotImplementedError`).
Note to authors of `~astropy.modeling.Model` subclasses: To define an
inverse for a model simply override this property to return the
appropriate model representing the inverse. The machinery that will
make the inverse manually-overridable is added automatically by the
base class.
"""
if self._user_inverse is not None:
return self._user_inverse
elif self._inverse is not None:
result = self._inverse()
if result is not NotImplemented:
if not self._has_inverse_bounding_box:
result.bounding_box = None
return result
raise NotImplementedError("No analytical or user-supplied inverse transform "
"has been implemented for this model.")
@inverse.setter
def inverse(self, value):
if not isinstance(value, (Model, type(None))):
raise ValueError(
"The ``inverse`` attribute may be assigned a `Model` "
"instance or `None` (where `None` explicitly forces the "
"model to have no inverse.")
self._user_inverse = value
@inverse.deleter
def inverse(self):
"""
Resets the model's inverse to its default (if one exists, otherwise
the model will have no inverse).
"""
try:
del self._user_inverse
except AttributeError:
pass
@property
def has_user_inverse(self):
"""
A flag indicating whether or not a custom inverse model has been
assigned to this model by a user, via assignment to ``model.inverse``.
"""
return self._user_inverse is not None
@property
def bounding_box(self):
r"""
A `tuple` of length `n_inputs` defining the bounding box limits, or
raise `NotImplementedError` for no bounding_box.
The default limits are given by a ``bounding_box`` property or method
defined in the class body of a specific model. If not defined then
this property just raises `NotImplementedError` by default (but may be
assigned a custom value by a user). ``bounding_box`` can be set
manually to an array-like object of shape ``(model.n_inputs, 2)``. For
further usage, see :ref:`astropy:bounding-boxes`
The limits are ordered according to the `numpy` ``'C'`` indexing
convention, and are the reverse of the model input order,
e.g. for inputs ``('x', 'y', 'z')``, ``bounding_box`` is defined:
* for 1D: ``(x_low, x_high)``
* for 2D: ``((y_low, y_high), (x_low, x_high))``
* for 3D: ``((z_low, z_high), (y_low, y_high), (x_low, x_high))``
Examples
--------
Setting the ``bounding_box`` limits for a 1D and 2D model:
>>> from astropy.modeling.models import Gaussian1D, Gaussian2D
>>> model_1d = Gaussian1D()
>>> model_2d = Gaussian2D(x_stddev=1, y_stddev=1)
>>> model_1d.bounding_box = (-5, 5)
>>> model_2d.bounding_box = ((-6, 6), (-5, 5))
Setting the bounding_box limits for a user-defined 3D `custom_model`:
>>> from astropy.modeling.models import custom_model
>>> def const3d(x, y, z, amp=1):
... return amp
...
>>> Const3D = custom_model(const3d)
>>> model_3d = Const3D()
>>> model_3d.bounding_box = ((-6, 6), (-5, 5), (-4, 4))
To reset ``bounding_box`` to its default limits just delete the
user-defined value--this will reset it back to the default defined
on the class:
>>> del model_1d.bounding_box
To disable the bounding box entirely (including the default),
set ``bounding_box`` to `None`:
>>> model_1d.bounding_box = None
>>> model_1d.bounding_box # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
NotImplementedError: No bounding box is defined for this model
(note: the bounding box was explicitly disabled for this model;
use `del model.bounding_box` to restore the default bounding box,
if one is defined for this model).
"""
if self._user_bounding_box is not None:
if self._user_bounding_box is NotImplemented:
raise NotImplementedError(
"No bounding box is defined for this model (note: the "
"bounding box was explicitly disabled for this model; "
"use `del model.bounding_box` to restore the default "
"bounding box, if one is defined for this model).")
return self._user_bounding_box
elif self._bounding_box is None:
raise NotImplementedError(
"No bounding box is defined for this model.")
elif isinstance(self._bounding_box, ModelBoundingBox):
# This typically implies a hard-coded bounding box. This will
# probably be rare, but it is an option
return self._bounding_box
elif isinstance(self._bounding_box, types.MethodType):
return ModelBoundingBox.validate(self, self._bounding_box())
else:
# The only other allowed possibility is that it's a ModelBoundingBox
# subclass, so we call it with its default arguments and return an
# instance of it (that can be called to recompute the bounding box
# with any optional parameters)
# (In other words, in this case self._bounding_box is a *class*)
bounding_box = self._bounding_box((), model=self)()
return self._bounding_box(bounding_box, model=self)
@bounding_box.setter
def bounding_box(self, bounding_box):
"""
Assigns the bounding box limits.
"""
if bounding_box is None:
cls = None
# We use this to explicitly set an unimplemented bounding box (as
# opposed to no user bounding box defined)
bounding_box = NotImplemented
elif (isinstance(bounding_box, CompoundBoundingBox) or
isinstance(bounding_box, dict)):
cls = CompoundBoundingBox
elif (isinstance(self._bounding_box, type) and
issubclass(self._bounding_box, ModelBoundingBox)):
cls = self._bounding_box
else:
cls = ModelBoundingBox
if cls is not None:
try:
bounding_box = cls.validate(self, bounding_box, _preserve_ignore=True)
except ValueError as exc:
raise ValueError(exc.args[0])
self._user_bounding_box = bounding_box
def set_slice_args(self, *args):
if isinstance(self._user_bounding_box, CompoundBoundingBox):
self._user_bounding_box.slice_args = args
else:
raise RuntimeError('The bounding_box for this model is not compound')
@bounding_box.deleter
def bounding_box(self):
self._user_bounding_box = None
@property
def has_user_bounding_box(self):
"""
A flag indicating whether or not a custom bounding_box has been
assigned to this model by a user, via assignment to
``model.bounding_box``.
"""
return self._user_bounding_box is not None
@property
def cov_matrix(self):
"""
Fitter should set covariance matrix, if available.
"""
return self._cov_matrix
@cov_matrix.setter
def cov_matrix(self, cov):
self._cov_matrix = cov
unfix_untied_params = [p for p in self.param_names if (self.fixed[p] is False)
and (self.tied[p] is False)]
if type(cov) == list: # model set
param_stds = []
for c in cov:
param_stds.append([np.sqrt(x) if x > 0 else None for x in np.diag(c.cov_matrix)])
for p, param_name in enumerate(unfix_untied_params):
par = getattr(self, param_name)
par.std = [item[p] for item in param_stds]
setattr(self, param_name, par)
else:
param_stds = [np.sqrt(x) if x > 0 else None for x in np.diag(cov.cov_matrix)]
for param_name in unfix_untied_params:
par = getattr(self, param_name)
par.std = param_stds.pop(0)
setattr(self, param_name, par)
@property
def stds(self):
"""
Standard deviation of parameters, if covariance matrix is available.
"""
return self._stds
@stds.setter
def stds(self, stds):
self._stds = stds
@property
def separable(self):
""" A flag indicating whether a model is separable."""
if self._separable is not None:
return self._separable
raise NotImplementedError(
'The "separable" property is not defined for '
f'model {self.__class__.__name__}')
# *** Public methods ***
def without_units_for_data(self, **kwargs):
"""
Return an instance of the model for which the parameter values have
been converted to the right units for the data, then the units have
been stripped away.
The input and output Quantity objects should be given as keyword
arguments.
Notes
-----
This method is needed in order to be able to fit models with units in
the parameters, since we need to temporarily strip away the units from
the model during the fitting (which might be done by e.g. scipy
functions).
The units that the parameters should be converted to are not
necessarily the units of the input data, but are derived from them.
Model subclasses that want fitting to work in the presence of
quantities need to define a ``_parameter_units_for_data_units`` method
that takes the input and output units (as two dictionaries) and
returns a dictionary giving the target units for each parameter.
"""
model = self.copy()
inputs_unit = {inp: getattr(kwargs[inp], 'unit', dimensionless_unscaled)
for inp in self.inputs if kwargs[inp] is not None}
outputs_unit = {out: getattr(kwargs[out], 'unit', dimensionless_unscaled)
for out in self.outputs if kwargs[out] is not None}
parameter_units = self._parameter_units_for_data_units(inputs_unit,
outputs_unit)
for name, unit in parameter_units.items():
parameter = getattr(model, name)
if parameter.unit is not None:
parameter.value = parameter.quantity.to(unit).value
parameter._set_unit(None, force=True)
if isinstance(model, CompoundModel):
model.strip_units_from_tree()
return model
def output_units(self, **kwargs):
"""
Return a dictionary of output units for this model given a dictionary
of fitting inputs and outputs
The input and output Quantity objects should be given as keyword
arguments.
Notes
-----
This method is needed in order to be able to fit models with units in
the parameters, since we need to temporarily strip away the units from
the model during the fitting (which might be done by e.g. scipy
functions).
This method will force extra model evaluations, which maybe computationally
expensive. To avoid this, one can add a return_units property to the model,
see :ref:`astropy:models_return_units`.
"""
units = self.return_units
if units is None or units == {}:
inputs = {inp: kwargs[inp] for inp in self.inputs}
values = self(**inputs)
if self.n_outputs == 1:
values = (values,)
units = {out: getattr(values[index], 'unit', dimensionless_unscaled)
for index, out in enumerate(self.outputs)}
return units
def strip_units_from_tree(self):
for item in self._leaflist:
for parname in item.param_names:
par = getattr(item, parname)
par._set_unit(None, force=True)
def with_units_from_data(self, **kwargs):
"""
Return an instance of the model which has units for which the parameter
values are compatible with the data units specified.
The input and output Quantity objects should be given as keyword
arguments.
Notes
-----
This method is needed in order to be able to fit models with units in
the parameters, since we need to temporarily strip away the units from
the model during the fitting (which might be done by e.g. scipy
functions).
The units that the parameters will gain are not necessarily the units
of the input data, but are derived from them. Model subclasses that
want fitting to work in the presence of quantities need to define a
``_parameter_units_for_data_units`` method that takes the input and output
units (as two dictionaries) and returns a dictionary giving the target
units for each parameter.
"""
model = self.copy()
inputs_unit = {inp: getattr(kwargs[inp], 'unit', dimensionless_unscaled)
for inp in self.inputs if kwargs[inp] is not None}
outputs_unit = {out: getattr(kwargs[out], 'unit', dimensionless_unscaled)
for out in self.outputs if kwargs[out] is not None}
parameter_units = self._parameter_units_for_data_units(inputs_unit,
outputs_unit)
# We are adding units to parameters that already have a value, but we
# don't want to convert the parameter, just add the unit directly,
# hence the call to ``_set_unit``.
for name, unit in parameter_units.items():
parameter = getattr(model, name)
parameter._set_unit(unit, force=True)
return model
@property
def _has_units(self):
# Returns True if any of the parameters have units
for param in self.param_names:
if getattr(self, param).unit is not None:
return True
else:
return False
@property
def _supports_unit_fitting(self):
# If the model has a ``_parameter_units_for_data_units`` method, this
# indicates that we have enough information to strip the units away
# and add them back after fitting, when fitting quantities
return hasattr(self, '_parameter_units_for_data_units')
@abc.abstractmethod
def evaluate(self, *args, **kwargs):
"""Evaluate the model on some input variables."""
def sum_of_implicit_terms(self, *args, **kwargs):
"""
Evaluate the sum of any implicit model terms on some input variables.
This includes any fixed terms used in evaluating a linear model that
do not have corresponding parameters exposed to the user. The
prototypical case is `astropy.modeling.functional_models.Shift`, which
corresponds to a function y = a + bx, where b=1 is intrinsically fixed
by the type of model, such that sum_of_implicit_terms(x) == x. This
method is needed by linear fitters to correct the dependent variable
for the implicit term(s) when solving for the remaining terms
(ie. a = y - bx).
"""
def render(self, out=None, coords=None):
"""
Evaluate a model at fixed positions, respecting the ``bounding_box``.
The key difference relative to evaluating the model directly is that
this method is limited to a bounding box if the `Model.bounding_box`
attribute is set.
Parameters
----------
out : `numpy.ndarray`, optional
An array that the evaluated model will be added to. If this is not
given (or given as ``None``), a new array will be created.
coords : array-like, optional
An array to be used to translate from the model's input coordinates
to the ``out`` array. It should have the property that
``self(coords)`` yields the same shape as ``out``. If ``out`` is
not specified, ``coords`` will be used to determine the shape of
the returned array. If this is not provided (or None), the model
will be evaluated on a grid determined by `Model.bounding_box`.
Returns
-------
out : `numpy.ndarray`
The model added to ``out`` if ``out`` is not ``None``, or else a
new array from evaluating the model over ``coords``.
If ``out`` and ``coords`` are both `None`, the returned array is
limited to the `Model.bounding_box` limits. If
`Model.bounding_box` is `None`, ``arr`` or ``coords`` must be
passed.
Raises
------
ValueError
If ``coords`` are not given and the the `Model.bounding_box` of
this model is not set.
Examples
--------
:ref:`astropy:bounding-boxes`
"""
try:
bbox = self.bounding_box
except NotImplementedError:
bbox = None
if isinstance(bbox, ModelBoundingBox):
bbox = bbox.bounding_box()
ndim = self.n_inputs
if (coords is None) and (out is None) and (bbox is None):
raise ValueError('If no bounding_box is set, '
'coords or out must be input.')
# for consistent indexing
if ndim == 1:
if coords is not None:
coords = [coords]
if bbox is not None:
bbox = [bbox]
if coords is not None:
coords = np.asanyarray(coords, dtype=float)
# Check dimensions match out and model
assert len(coords) == ndim
if out is not None:
if coords[0].shape != out.shape:
raise ValueError('inconsistent shape of the output.')
else:
out = np.zeros(coords[0].shape)
if out is not None:
out = np.asanyarray(out)
if out.ndim != ndim:
raise ValueError('the array and model must have the same '
'number of dimensions.')
if bbox is not None:
# Assures position is at center pixel,
# important when using add_array.
pd = np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2))
for bb in bbox]).astype(int).T
pos, delta = pd
if coords is not None:
sub_shape = tuple(delta * 2 + 1)
sub_coords = np.array([extract_array(c, sub_shape, pos)
for c in coords])
else:
limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T]
sub_coords = np.mgrid[limits]
sub_coords = sub_coords[::-1]
if out is None:
out = self(*sub_coords)
else:
try:
out = add_array(out, self(*sub_coords), pos)
except ValueError:
raise ValueError(
'The `bounding_box` is larger than the input out in '
'one or more dimensions. Set '
'`model.bounding_box = None`.')
else:
if coords is None:
im_shape = out.shape
limits = [slice(i) for i in im_shape]
coords = np.mgrid[limits]
coords = coords[::-1]
out += self(*coords)
return out
@property
def input_units(self):
"""
This property is used to indicate what units or sets of units the
evaluate method expects, and returns a dictionary mapping inputs to
units (or `None` if any units are accepted).
Model sub-classes can also use function annotations in evaluate to
indicate valid input units, in which case this property should
not be overridden since it will return the input units based on the
annotations.
"""
if hasattr(self, '_input_units'):
return self._input_units
elif hasattr(self.evaluate, '__annotations__'):
annotations = self.evaluate.__annotations__.copy()
annotations.pop('return', None)
if annotations:
# If there are not annotations for all inputs this will error.
return {name: annotations[name] for name in self.inputs}
else:
# None means any unit is accepted
return None
@property
def return_units(self):
"""
This property is used to indicate what units or sets of units the
output of evaluate should be in, and returns a dictionary mapping
outputs to units (or `None` if any units are accepted).
Model sub-classes can also use function annotations in evaluate to
indicate valid output units, in which case this property should not be
overridden since it will return the return units based on the
annotations.
"""
if hasattr(self, '_return_units'):
return self._return_units
elif hasattr(self.evaluate, '__annotations__'):
return self.evaluate.__annotations__.get('return', None)
else:
# None means any unit is accepted
return None
def _prepare_inputs_single_model(self, params, inputs, **kwargs):
broadcasts = []
for idx, _input in enumerate(inputs):
input_shape = _input.shape
# Ensure that array scalars are always upgrade to 1-D arrays for the
# sake of consistency with how parameters work. They will be cast back
# to scalars at the end
if not input_shape:
inputs[idx] = _input.reshape((1,))
if not params:
max_broadcast = input_shape
else:
max_broadcast = ()
for param in params:
try:
if self.standard_broadcasting:
broadcast = check_broadcast(input_shape, param.shape)
else:
broadcast = input_shape
except IncompatibleShapeError:
raise ValueError(
f"self input argument {self.inputs[idx]!r} of shape {input_shape!r} "
f"cannot be broadcast with parameter {param.name!r} of shape "
f"{param.shape!r}.")
if len(broadcast) > len(max_broadcast):
max_broadcast = broadcast
elif len(broadcast) == len(max_broadcast):
max_broadcast = max(max_broadcast, broadcast)
broadcasts.append(max_broadcast)
if self.n_outputs > self.n_inputs:
extra_outputs = self.n_outputs - self.n_inputs
if not broadcasts:
# If there were no inputs then the broadcasts list is empty
# just add a None since there is no broadcasting of outputs and
# inputs necessary (see _prepare_outputs_single_self)
broadcasts.append(None)
broadcasts.extend([broadcasts[0]] * extra_outputs)
return inputs, (broadcasts,)
@staticmethod
def _remove_axes_from_shape(shape, axis):
"""
Given a shape tuple as the first input, construct a new one by removing
that particular axis from the shape and all preceeding axes. Negative axis
numbers are permittted, where the axis is relative to the last axis.
"""
if len(shape) == 0:
return shape
if axis < 0:
axis = len(shape) + axis
return shape[:axis] + shape[axis+1:]
if axis >= len(shape):
axis = len(shape)-1
shape = shape[axis+1:]
return shape
def _prepare_inputs_model_set(self, params, inputs, model_set_axis_input,
**kwargs):
reshaped = []
pivots = []
model_set_axis_param = self.model_set_axis # needed to reshape param
for idx, _input in enumerate(inputs):
max_param_shape = ()
if self._n_models > 1 and model_set_axis_input is not False:
# Use the shape of the input *excluding* the model axis
input_shape = (_input.shape[:model_set_axis_input] +
_input.shape[model_set_axis_input + 1:])
else:
input_shape = _input.shape
for param in params:
try:
check_broadcast(input_shape,
self._remove_axes_from_shape(param.shape,
model_set_axis_param))
except IncompatibleShapeError:
raise ValueError(
f"Model input argument {self.inputs[idx]!r} of shape {input_shape!r} "
f"cannot be broadcast with parameter {param.name!r} of shape "
f"{self._remove_axes_from_shape(param.shape, model_set_axis_param)!r}.")
if len(param.shape) - 1 > len(max_param_shape):
max_param_shape = self._remove_axes_from_shape(param.shape,
model_set_axis_param)
# We've now determined that, excluding the model_set_axis, the
# input can broadcast with all the parameters
input_ndim = len(input_shape)
if model_set_axis_input is False:
if len(max_param_shape) > input_ndim:
# Just needs to prepend new axes to the input
n_new_axes = 1 + len(max_param_shape) - input_ndim
new_axes = (1,) * n_new_axes
new_shape = new_axes + _input.shape
pivot = model_set_axis_param
else:
pivot = input_ndim - len(max_param_shape)
new_shape = (_input.shape[:pivot] + (1,) +
_input.shape[pivot:])
new_input = _input.reshape(new_shape)
else:
if len(max_param_shape) >= input_ndim:
n_new_axes = len(max_param_shape) - input_ndim
pivot = self.model_set_axis
new_axes = (1,) * n_new_axes
new_shape = (_input.shape[:pivot + 1] + new_axes +
_input.shape[pivot + 1:])
new_input = _input.reshape(new_shape)
else:
pivot = _input.ndim - len(max_param_shape) - 1
new_input = np.rollaxis(_input, model_set_axis_input,
pivot + 1)
pivots.append(pivot)
reshaped.append(new_input)
if self.n_inputs < self.n_outputs:
pivots.extend([model_set_axis_input] * (self.n_outputs - self.n_inputs))
return reshaped, (pivots,)
def prepare_inputs(self, *inputs, model_set_axis=None, equivalencies=None,
**kwargs):
"""
This method is used in `~astropy.modeling.Model.__call__` to ensure
that all the inputs to the model can be broadcast into compatible
shapes (if one or both of them are input as arrays), particularly if
there are more than one parameter sets. This also makes sure that (if
applicable) the units of the input will be compatible with the evaluate
method.
"""
# When we instantiate the model class, we make sure that __call__ can
# take the following two keyword arguments: model_set_axis and
# equivalencies.
if model_set_axis is None:
# By default the model_set_axis for the input is assumed to be the
# same as that for the parameters the model was defined with
# TODO: Ensure that negative model_set_axis arguments are respected
model_set_axis = self.model_set_axis
params = [getattr(self, name) for name in self.param_names]
inputs = [np.asanyarray(_input, dtype=float) for _input in inputs]
self._validate_input_shapes(inputs, self.inputs, model_set_axis)
inputs_map = kwargs.get('inputs_map', None)
inputs = self._validate_input_units(inputs, equivalencies, inputs_map)
# The input formatting required for single models versus a multiple
# model set are different enough that they've been split into separate
# subroutines
if self._n_models == 1:
return self._prepare_inputs_single_model(params, inputs, **kwargs)
else:
return self._prepare_inputs_model_set(params, inputs,
model_set_axis, **kwargs)
def _validate_input_units(self, inputs, equivalencies=None, inputs_map=None):
inputs = list(inputs)
name = self.name or self.__class__.__name__
# Check that the units are correct, if applicable
if self.input_units is not None:
# If a leaflist is provided that means this is in the context of
# a compound model and it is necessary to create the appropriate
# alias for the input coordinate name for the equivalencies dict
if inputs_map:
edict = {}
for mod, mapping in inputs_map:
if self is mod:
edict[mapping[0]] = equivalencies[mapping[1]]
else:
edict = equivalencies
# We combine any instance-level input equivalencies with user
# specified ones at call-time.
input_units_equivalencies = _combine_equivalency_dict(self.inputs,
edict,
self.input_units_equivalencies)
# We now iterate over the different inputs and make sure that their
# units are consistent with those specified in input_units.
for i in range(len(inputs)):
input_name = self.inputs[i]
input_unit = self.input_units.get(input_name, None)
if input_unit is None:
continue
if isinstance(inputs[i], Quantity):
# We check for consistency of the units with input_units,
# taking into account any equivalencies
if inputs[i].unit.is_equivalent(
input_unit,
equivalencies=input_units_equivalencies[input_name]):
# If equivalencies have been specified, we need to
# convert the input to the input units - this is
# because some equivalencies are non-linear, and
# we need to be sure that we evaluate the model in
# its own frame of reference. If input_units_strict
# is set, we also need to convert to the input units.
if (len(input_units_equivalencies) > 0 or
self.input_units_strict[input_name]):
inputs[i] = inputs[i].to(input_unit,
equivalencies=input_units_equivalencies[
input_name])
else:
# We consider the following two cases separately so as
# to be able to raise more appropriate/nicer exceptions
if input_unit is dimensionless_unscaled:
raise UnitsError(f"{name}: Units of input '{self.inputs[i]}', "
f"{inputs[i].unit} ({inputs[i].unit.physical_type}),"
"could not be converted to "
"required dimensionless "
"input")
else:
raise UnitsError(f"{name}: Units of input '{self.inputs[i]}', "
f"{inputs[i].unit} ({inputs[i].unit.physical_type}),"
" could not be "
"converted to required input"
f" units of {input_unit} ({input_unit.physical_type})")
else:
# If we allow dimensionless input, we add the units to the
# input values without conversion, otherwise we raise an
# exception.
if (not self.input_units_allow_dimensionless[input_name]
and input_unit is not dimensionless_unscaled
and input_unit is not None):
if np.any(inputs[i] != 0):
raise UnitsError(f"{name}: Units of input '{self.inputs[i]}',"
" (dimensionless), could not be converted to required "
f"input units of {input_unit} "
f"({input_unit.physical_type})")
return inputs
def _process_output_units(self, inputs, outputs):
inputs_are_quantity = any([isinstance(i, Quantity) for i in inputs])
if self.return_units and inputs_are_quantity:
# We allow a non-iterable unit only if there is one output
if self.n_outputs == 1 and not isiterable(self.return_units):
return_units = {self.outputs[0]: self.return_units}
else:
return_units = self.return_units
outputs = tuple(Quantity(out, return_units.get(out_name, None), subok=True)
for out, out_name in zip(outputs, self.outputs))
return outputs
@staticmethod
def _prepare_output_single_model(output, broadcast_shape):
if broadcast_shape is not None:
if not broadcast_shape:
return output.item()
else:
try:
return output.reshape(broadcast_shape)
except ValueError:
try:
return output.item()
except ValueError:
return output
return output
def _prepare_outputs_single_model(self, outputs, broadcasted_shapes):
outputs = list(outputs)
for idx, output in enumerate(outputs):
try:
broadcast_shape = check_broadcast(*broadcasted_shapes[0])
except (IndexError, TypeError):
broadcast_shape = broadcasted_shapes[0][idx]
outputs[idx] = self._prepare_output_single_model(output, broadcast_shape)
return tuple(outputs)
def _prepare_outputs_model_set(self, outputs, broadcasted_shapes, model_set_axis):
pivots = broadcasted_shapes[0]
# If model_set_axis = False was passed then use
# self._model_set_axis to format the output.
if model_set_axis is None or model_set_axis is False:
model_set_axis = self.model_set_axis
outputs = list(outputs)
for idx, output in enumerate(outputs):
pivot = pivots[idx]
if pivot < output.ndim and pivot != model_set_axis:
outputs[idx] = np.rollaxis(output, pivot,
model_set_axis)
return tuple(outputs)
def prepare_outputs(self, broadcasted_shapes, *outputs, **kwargs):
model_set_axis = kwargs.get('model_set_axis', None)
if len(self) == 1:
return self._prepare_outputs_single_model(outputs, broadcasted_shapes)
else:
return self._prepare_outputs_model_set(outputs, broadcasted_shapes, model_set_axis)
def copy(self):
"""
Return a copy of this model.
Uses a deep copy so that all model attributes, including parameter
values, are copied as well.
"""
return copy.deepcopy(self)
def deepcopy(self):
"""
Return a deep copy of this model.
"""
return self.copy()
@sharedmethod
def rename(self, name):
"""
Return a copy of this model with a new name.
"""
new_model = self.copy()
new_model._name = name
return new_model
def coerce_units(
self,
input_units=None,
return_units=None,
input_units_equivalencies=None,
input_units_allow_dimensionless=False
):
"""
Attach units to this (unitless) model.
Parameters
----------
input_units : dict or tuple, optional
Input units to attach. If dict, each key is the name of a model input,
and the value is the unit to attach. If tuple, the elements are units
to attach in order corresponding to `Model.inputs`.
return_units : dict or tuple, optional
Output units to attach. If dict, each key is the name of a model output,
and the value is the unit to attach. If tuple, the elements are units
to attach in order corresponding to `Model.outputs`.
input_units_equivalencies : dict, optional
Default equivalencies to apply to input values. If set, this should be a
dictionary where each key is a string that corresponds to one of the
model inputs.
input_units_allow_dimensionless : bool or dict, optional
Allow dimensionless input. If this is True, input values to evaluate will
gain the units specified in input_units. If this is a dictionary then it
should map input name to a bool to allow dimensionless numbers for that
input.
Returns
-------
`CompoundModel`
A `CompoundModel` composed of the current model plus
`~astropy.modeling.mappings.UnitsMapping` model(s) that attach the units.
Raises
------
ValueError
If the current model already has units.
Examples
--------
Wrapping a unitless model to require and convert units:
>>> from astropy.modeling.models import Polynomial1D
>>> from astropy import units as u
>>> poly = Polynomial1D(1, c0=1, c1=2)
>>> model = poly.coerce_units((u.m,), (u.s,))
>>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP
<Quantity 21. s>
>>> model(u.Quantity(1000, u.cm)) # doctest: +FLOAT_CMP
<Quantity 21. s>
>>> model(u.Quantity(10, u.cm)) # doctest: +FLOAT_CMP
<Quantity 1.2 s>
Wrapping a unitless model but still permitting unitless input:
>>> from astropy.modeling.models import Polynomial1D
>>> from astropy import units as u
>>> poly = Polynomial1D(1, c0=1, c1=2)
>>> model = poly.coerce_units((u.m,), (u.s,), input_units_allow_dimensionless=True)
>>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP
<Quantity 21. s>
>>> model(10) # doctest: +FLOAT_CMP
<Quantity 21. s>
"""
from .mappings import UnitsMapping
result = self
if input_units is not None:
if self.input_units is not None:
model_units = self.input_units
else:
model_units = {}
for unit in [model_units.get(i) for i in self.inputs]:
if unit is not None and unit != dimensionless_unscaled:
raise ValueError("Cannot specify input_units for model with "
"existing input units")
if isinstance(input_units, dict):
if input_units.keys() != set(self.inputs):
message = (
f"""input_units keys ({", ".join(input_units.keys())}) """
f"""do not match model inputs ({", ".join(self.inputs)})"""
)
raise ValueError(message)
input_units = [input_units[i] for i in self.inputs]
if len(input_units) != self.n_inputs:
message = (
"input_units length does not match n_inputs: "
f"expected {self.n_inputs}, received {len(input_units)}"
)
raise ValueError(message)
mapping = tuple((unit, model_units.get(i)) for i, unit in zip(self.inputs, input_units))
input_mapping = UnitsMapping(
mapping,
input_units_equivalencies=input_units_equivalencies,
input_units_allow_dimensionless=input_units_allow_dimensionless
)
input_mapping.inputs = self.inputs
input_mapping.outputs = self.inputs
result = input_mapping | result
if return_units is not None:
if self.return_units is not None:
model_units = self.return_units
else:
model_units = {}
for unit in [model_units.get(i) for i in self.outputs]:
if unit is not None and unit != dimensionless_unscaled:
raise ValueError("Cannot specify return_units for model "
"with existing output units")
if isinstance(return_units, dict):
if return_units.keys() != set(self.outputs):
message = (
f"""return_units keys ({", ".join(return_units.keys())}) """
f"""do not match model outputs ({", ".join(self.outputs)})"""
)
raise ValueError(message)
return_units = [return_units[i] for i in self.outputs]
if len(return_units) != self.n_outputs:
message = (
"return_units length does not match n_outputs: "
f"expected {self.n_outputs}, received {len(return_units)}"
)
raise ValueError(message)
mapping = tuple((model_units.get(i), unit)
for i, unit in zip(self.outputs, return_units))
return_mapping = UnitsMapping(mapping)
return_mapping.inputs = self.outputs
return_mapping.outputs = self.outputs
result = result | return_mapping
return result
@property
def n_submodels(self):
"""
Return the number of components in a single model, which is
obviously 1.
"""
return 1
def _initialize_constraints(self, kwargs):
"""
Pop parameter constraint values off the keyword arguments passed to
`Model.__init__` and store them in private instance attributes.
"""
# Pop any constraints off the keyword arguments
for constraint in self.parameter_constraints:
values = kwargs.pop(constraint, {})
for ckey, cvalue in values.items():
param = getattr(self, ckey)
setattr(param, constraint, cvalue)
self._mconstraints = {}
for constraint in self.model_constraints:
values = kwargs.pop(constraint, [])
self._mconstraints[constraint] = values
def _initialize_parameters(self, args, kwargs):
"""
Initialize the _parameters array that stores raw parameter values for
all parameter sets for use with vectorized fitting algorithms; on
FittableModels the _param_name attributes actually just reference
slices of this array.
"""
n_models = kwargs.pop('n_models', None)
if not (n_models is None or
(isinstance(n_models, (int, np.integer)) and n_models >= 1)):
raise ValueError(
"n_models must be either None (in which case it is "
"determined from the model_set_axis of the parameter initial "
"values) or it must be a positive integer "
f"(got {n_models!r})")
model_set_axis = kwargs.pop('model_set_axis', None)
if model_set_axis is None:
if n_models is not None and n_models > 1:
# Default to zero
model_set_axis = 0
else:
# Otherwise disable
model_set_axis = False
else:
if not (model_set_axis is False or
np.issubdtype(type(model_set_axis), np.integer)):
raise ValueError(
"model_set_axis must be either False or an integer "
"specifying the parameter array axis to map to each "
f"model in a set of models (got {model_set_axis!r}).")
# Process positional arguments by matching them up with the
# corresponding parameters in self.param_names--if any also appear as
# keyword arguments this presents a conflict
params = set()
if len(args) > len(self.param_names):
raise TypeError(
f"{self.__class__.__name__}.__init__() takes at most "
f"{len(self.param_names)} positional arguments ({len(args)} given)")
self._model_set_axis = model_set_axis
self._param_metrics = defaultdict(dict)
for idx, arg in enumerate(args):
if arg is None:
# A value of None implies using the default value, if exists
continue
# We use quantity_asanyarray here instead of np.asanyarray because
# if any of the arguments are quantities, we need to return a
# Quantity object not a plain Numpy array.
param_name = self.param_names[idx]
params.add(param_name)
if not isinstance(arg, Parameter):
value = quantity_asanyarray(arg, dtype=float)
else:
value = arg
self._initialize_parameter_value(param_name, value)
# At this point the only remaining keyword arguments should be
# parameter names; any others are in error.
for param_name in self.param_names:
if param_name in kwargs:
if param_name in params:
raise TypeError(
f"{self.__class__.__name__}.__init__() got multiple values for parameter "
f"{param_name!r}")
value = kwargs.pop(param_name)
if value is None:
continue
# We use quantity_asanyarray here instead of np.asanyarray
# because if any of the arguments are quantities, we need
# to return a Quantity object not a plain Numpy array.
value = quantity_asanyarray(value, dtype=float)
params.add(param_name)
self._initialize_parameter_value(param_name, value)
# Now deal with case where param_name is not supplied by args or kwargs
for param_name in self.param_names:
if param_name not in params:
self._initialize_parameter_value(param_name, None)
if kwargs:
# If any keyword arguments were left over at this point they are
# invalid--the base class should only be passed the parameter
# values, constraints, and param_dim
for kwarg in kwargs:
# Just raise an error on the first unrecognized argument
raise TypeError(
f"{self.__class__.__name__}.__init__() got an unrecognized parameter "
f"{kwarg!r}")
# Determine the number of model sets: If the model_set_axis is
# None then there is just one parameter set; otherwise it is determined
# by the size of that axis on the first parameter--if the other
# parameters don't have the right number of axes or the sizes of their
# model_set_axis don't match an error is raised
if model_set_axis is not False and n_models != 1 and params:
max_ndim = 0
if model_set_axis < 0:
min_ndim = abs(model_set_axis)
else:
min_ndim = model_set_axis + 1
for name in self.param_names:
value = getattr(self, name)
param_ndim = np.ndim(value)
if param_ndim < min_ndim:
raise InputParameterError(
"All parameter values must be arrays of dimension "
f"at least {min_ndim} for model_set_axis={model_set_axis} (the value "
f"given for {name!r} is only {param_ndim}-dimensional)")
max_ndim = max(max_ndim, param_ndim)
if n_models is None:
# Use the dimensions of the first parameter to determine
# the number of model sets
n_models = value.shape[model_set_axis]
elif value.shape[model_set_axis] != n_models:
raise InputParameterError(
f"Inconsistent dimensions for parameter {name!r} for "
f"{n_models} model sets. The length of axis {model_set_axis} must be the "
"same for all input parameter values")
self._check_param_broadcast(max_ndim)
else:
if n_models is None:
n_models = 1
self._check_param_broadcast(None)
self._n_models = n_models
# now validate parameters
for name in params:
param = getattr(self, name)
if param._validator is not None:
param._validator(self, param.value)
def _initialize_parameter_value(self, param_name, value):
"""Mostly deals with consistency checks and determining unit issues."""
if isinstance(value, Parameter):
self.__dict__[param_name] = value
return
param = getattr(self, param_name)
# Use default if value is not provided
if value is None:
default = param.default
if default is None:
# No value was supplied for the parameter and the
# parameter does not have a default, therefore the model
# is underspecified
raise TypeError(f"{self.__class__.__name__}.__init__() requires a value for "
f"parameter {param_name!r}")
value = default
unit = param.unit
else:
if isinstance(value, Quantity):
unit = value.unit
value = value.value
else:
unit = None
if unit is None and param.unit is not None:
raise InputParameterError(
f"{self.__class__.__name__}.__init__() requires a Quantity for parameter "
f"{param_name!r}")
param._unit = unit
param.internal_unit = None
if param._setter is not None:
if unit is not None:
_val = param._setter(value * unit)
else:
_val = param._setter(value)
if isinstance(_val, Quantity):
param.internal_unit = _val.unit
param._internal_value = np.array(_val.value)
else:
param.internal_unit = None
param._internal_value = np.array(_val)
else:
param._value = np.array(value)
def _initialize_slices(self):
param_metrics = self._param_metrics
total_size = 0
for name in self.param_names:
param = getattr(self, name)
value = param.value
param_size = np.size(value)
param_shape = np.shape(value)
param_slice = slice(total_size, total_size + param_size)
param_metrics[name]['slice'] = param_slice
param_metrics[name]['shape'] = param_shape
param_metrics[name]['size'] = param_size
total_size += param_size
self._parameters = np.empty(total_size, dtype=np.float64)
def _parameters_to_array(self):
# Now set the parameter values (this will also fill
# self._parameters)
param_metrics = self._param_metrics
for name in self.param_names:
param = getattr(self, name)
value = param.value
if not isinstance(value, np.ndarray):
value = np.array([value])
self._parameters[param_metrics[name]['slice']] = value.ravel()
# Finally validate all the parameters; we do this last so that
# validators that depend on one of the other parameters' values will
# work
def _array_to_parameters(self):
param_metrics = self._param_metrics
for name in self.param_names:
param = getattr(self, name)
value = self._parameters[param_metrics[name]['slice']]
value.shape = param_metrics[name]['shape']
param.value = value
def _check_param_broadcast(self, max_ndim):
"""
This subroutine checks that all parameter arrays can be broadcast
against each other, and determines the shapes parameters must have in
order to broadcast correctly.
If model_set_axis is None this merely checks that the parameters
broadcast and returns an empty dict if so. This mode is only used for
single model sets.
"""
all_shapes = []
model_set_axis = self._model_set_axis
for name in self.param_names:
param = getattr(self, name)
value = param.value
param_shape = np.shape(value)
param_ndim = len(param_shape)
if max_ndim is not None and param_ndim < max_ndim:
# All arrays have the same number of dimensions up to the
# model_set_axis dimension, but after that they may have a
# different number of trailing axes. The number of trailing
# axes must be extended for mutual compatibility. For example
# if max_ndim = 3 and model_set_axis = 0, an array with the
# shape (2, 2) must be extended to (2, 1, 2). However, an
# array with shape (2,) is extended to (2, 1).
new_axes = (1,) * (max_ndim - param_ndim)
if model_set_axis < 0:
# Just need to prepend axes to make up the difference
broadcast_shape = new_axes + param_shape
else:
broadcast_shape = (param_shape[:model_set_axis + 1] +
new_axes +
param_shape[model_set_axis + 1:])
self._param_metrics[name]['broadcast_shape'] = broadcast_shape
all_shapes.append(broadcast_shape)
else:
all_shapes.append(param_shape)
# Now check mutual broadcastability of all shapes
try:
check_broadcast(*all_shapes)
except IncompatibleShapeError as exc:
shape_a, shape_a_idx, shape_b, shape_b_idx = exc.args
param_a = self.param_names[shape_a_idx]
param_b = self.param_names[shape_b_idx]
raise InputParameterError(
f"Parameter {param_a!r} of shape {shape_a!r} cannot be broadcast with "
f"parameter {param_b!r} of shape {shape_b!r}. All parameter arrays "
"must have shapes that are mutually compatible according "
"to the broadcasting rules.")
def _param_sets(self, raw=False, units=False):
"""
Implementation of the Model.param_sets property.
This internal implementation has a ``raw`` argument which controls
whether or not to return the raw parameter values (i.e. the values that
are actually stored in the ._parameters array, as opposed to the values
displayed to users. In most cases these are one in the same but there
are currently a few exceptions.
Note: This is notably an overcomplicated device and may be removed
entirely in the near future.
"""
values = []
shapes = []
for name in self.param_names:
param = getattr(self, name)
if raw and param._setter:
value = param._internal_value
else:
value = param.value
broadcast_shape = self._param_metrics[name].get('broadcast_shape')
if broadcast_shape is not None:
value = value.reshape(broadcast_shape)
shapes.append(np.shape(value))
if len(self) == 1:
# Add a single param set axis to the parameter's value (thus
# converting scalars to shape (1,) array values) for
# consistency
value = np.array([value])
if units:
if raw and param.internal_unit is not None:
unit = param.internal_unit
else:
unit = param.unit
if unit is not None:
value = Quantity(value, unit)
values.append(value)
if len(set(shapes)) != 1 or units:
# If the parameters are not all the same shape, converting to an
# array is going to produce an object array
# However the way Numpy creates object arrays is tricky in that it
# will recurse into array objects in the list and break them up
# into separate objects. Doing things this way ensures a 1-D
# object array the elements of which are the individual parameter
# arrays. There's not much reason to do this over returning a list
# except for consistency
psets = np.empty(len(values), dtype=object)
psets[:] = values
return psets
return np.array(values)
def _format_repr(self, args=[], kwargs={}, defaults={}):
"""
Internal implementation of ``__repr__``.
This is separated out for ease of use by subclasses that wish to
override the default ``__repr__`` while keeping the same basic
formatting.
"""
parts = [repr(a) for a in args]
parts.extend(
f"{name}={param_repr_oneline(getattr(self, name))}"
for name in self.param_names)
if self.name is not None:
parts.append(f'name={self.name!r}')
for kwarg, value in kwargs.items():
if kwarg in defaults and defaults[kwarg] == value:
continue
parts.append(f'{kwarg}={value!r}')
if len(self) > 1:
parts.append(f"n_models={len(self)}")
return f"<{self.__class__.__name__}({', '.join(parts)})>"
def _format_str(self, keywords=[], defaults={}):
"""
Internal implementation of ``__str__``.
This is separated out for ease of use by subclasses that wish to
override the default ``__str__`` while keeping the same basic
formatting.
"""
default_keywords = [
('Model', self.__class__.__name__),
('Name', self.name),
('Inputs', self.inputs),
('Outputs', self.outputs),
('Model set size', len(self))
]
parts = [f'{keyword}: {value}'
for keyword, value in default_keywords
if value is not None]
for keyword, value in keywords:
if keyword.lower() in defaults and defaults[keyword.lower()] == value:
continue
parts.append(f'{keyword}: {value}')
parts.append('Parameters:')
if len(self) == 1:
columns = [[getattr(self, name).value]
for name in self.param_names]
else:
columns = [getattr(self, name).value
for name in self.param_names]
if columns:
param_table = Table(columns, names=self.param_names)
# Set units on the columns
for name in self.param_names:
param_table[name].unit = getattr(self, name).unit
parts.append(indent(str(param_table), width=4))
return '\n'.join(parts)
class FittableModel(Model):
"""
Base class for models that can be fitted using the built-in fitting
algorithms.
"""
linear = False
# derivative with respect to parameters
fit_deriv = None
"""
Function (similar to the model's `~Model.evaluate`) to compute the
derivatives of the model with respect to its parameters, for use by fitting
algorithms. In other words, this computes the Jacobian matrix with respect
to the model's parameters.
"""
# Flag that indicates if the model derivatives with respect to parameters
# are given in columns or rows
col_fit_deriv = True
fittable = True
class Fittable1DModel(FittableModel):
"""
Base class for one-dimensional fittable models.
This class provides an easier interface to defining new models.
Examples can be found in `astropy.modeling.functional_models`.
"""
n_inputs = 1
n_outputs = 1
_separable = True
class Fittable2DModel(FittableModel):
"""
Base class for two-dimensional fittable models.
This class provides an easier interface to defining new models.
Examples can be found in `astropy.modeling.functional_models`.
"""
n_inputs = 2
n_outputs = 1
def _make_arithmetic_operator(oper):
# We don't bother with tuple unpacking here for efficiency's sake, but for
# documentation purposes:
#
# f_eval, f_n_inputs, f_n_outputs = f
#
# and similarly for g
def op(f, g):
return (make_binary_operator_eval(oper, f[0], g[0]), f[1], f[2])
return op
def _composition_operator(f, g):
# We don't bother with tuple unpacking here for efficiency's sake, but for
# documentation purposes:
#
# f_eval, f_n_inputs, f_n_outputs = f
#
# and similarly for g
return (lambda inputs, params: g[0](f[0](inputs, params), params),
f[1], g[2])
def _join_operator(f, g):
# We don't bother with tuple unpacking here for efficiency's sake, but for
# documentation purposes:
#
# f_eval, f_n_inputs, f_n_outputs = f
#
# and similarly for g
return (lambda inputs, params: (f[0](inputs[:f[1]], params) +
g[0](inputs[f[1]:], params)),
f[1] + g[1], f[2] + g[2])
BINARY_OPERATORS = {
'+': _make_arithmetic_operator(operator.add),
'-': _make_arithmetic_operator(operator.sub),
'*': _make_arithmetic_operator(operator.mul),
'/': _make_arithmetic_operator(operator.truediv),
'**': _make_arithmetic_operator(operator.pow),
'|': _composition_operator,
'&': _join_operator
}
SPECIAL_OPERATORS = _SpecialOperatorsDict()
def _add_special_operator(sop_name, sop):
return SPECIAL_OPERATORS.add(sop_name, sop)
class CompoundModel(Model):
'''
Base class for compound models.
While it can be used directly, the recommended way
to combine models is through the model operators.
'''
def __init__(self, op, left, right, name=None):
self.__dict__['_param_names'] = None
self._n_submodels = None
self.op = op
self.left = left
self.right = right
self._bounding_box = None
self._user_bounding_box = None
self._leaflist = None
self._tdict = None
self._parameters = None
self._parameters_ = None
self._param_metrics = None
if op != 'fix_inputs' and len(left) != len(right):
raise ValueError(
'Both operands must have equal values for n_models')
self._n_models = len(left)
if op != 'fix_inputs' and ((left.model_set_axis != right.model_set_axis)
or left.model_set_axis): # not False and not 0
raise ValueError("model_set_axis must be False or 0 and consistent for operands")
self._model_set_axis = left.model_set_axis
if op in ['+', '-', '*', '/', '**'] or op in SPECIAL_OPERATORS:
if (left.n_inputs != right.n_inputs or
left.n_outputs != right.n_outputs):
raise ModelDefinitionError(
'Both operands must match numbers of inputs and outputs')
self.n_inputs = left.n_inputs
self.n_outputs = left.n_outputs
self.inputs = left.inputs
self.outputs = left.outputs
elif op == '&':
self.n_inputs = left.n_inputs + right.n_inputs
self.n_outputs = left.n_outputs + right.n_outputs
self.inputs = combine_labels(left.inputs, right.inputs)
self.outputs = combine_labels(left.outputs, right.outputs)
elif op == '|':
if left.n_outputs != right.n_inputs:
raise ModelDefinitionError(
f"Unsupported operands for |: {left.name} (n_inputs={left.n_inputs}, "
f"n_outputs={left.n_outputs}) and {right.name} "
f"(n_inputs={right.n_inputs}, n_outputs={right.n_outputs}); "
"n_outputs for the left-hand model must match n_inputs "
"for the right-hand model.")
self.n_inputs = left.n_inputs
self.n_outputs = right.n_outputs
self.inputs = left.inputs
self.outputs = right.outputs
elif op == 'fix_inputs':
if not isinstance(left, Model):
raise ValueError('First argument to "fix_inputs" must be an instance of '
'an astropy Model.')
if not isinstance(right, dict):
raise ValueError('Expected a dictionary for second argument of "fix_inputs".')
# Dict keys must match either possible indices
# for model on left side, or names for inputs.
self.n_inputs = left.n_inputs - len(right)
# Assign directly to the private attribute (instead of using the setter)
# to avoid asserting the new number of outputs matches the old one.
self._outputs = left.outputs
self.n_outputs = left.n_outputs
newinputs = list(left.inputs)
keys = right.keys()
input_ind = []
for key in keys:
if np.issubdtype(type(key), np.integer):
if key >= left.n_inputs or key < 0:
raise ValueError(
'Substitution key integer value '
'not among possible input choices.')
if key in input_ind:
raise ValueError("Duplicate specification of "
"same input (index/name).")
input_ind.append(key)
elif isinstance(key, str):
if key not in left.inputs:
raise ValueError(
'Substitution key string not among possible '
'input choices.')
# Check to see it doesn't match positional
# specification.
ind = left.inputs.index(key)
if ind in input_ind:
raise ValueError("Duplicate specification of "
"same input (index/name).")
input_ind.append(ind)
# Remove substituted inputs
input_ind.sort()
input_ind.reverse()
for ind in input_ind:
del newinputs[ind]
self.inputs = tuple(newinputs)
# Now check to see if the input model has bounding_box defined.
# If so, remove the appropriate dimensions and set it for this
# instance.
try:
self.bounding_box = self.left.bounding_box.fix_inputs(self, right)
except NotImplementedError:
pass
else:
raise ModelDefinitionError('Illegal operator: ', self.op)
self.name = name
self._fittable = None
self.fit_deriv = None
self.col_fit_deriv = None
if op in ('|', '+', '-'):
self.linear = left.linear and right.linear
else:
self.linear = False
self.eqcons = []
self.ineqcons = []
self.n_left_params = len(self.left.parameters)
self._map_parameters()
def _get_left_inputs_from_args(self, args):
return args[:self.left.n_inputs]
def _get_right_inputs_from_args(self, args):
op = self.op
if op == '&':
# Args expected to look like (*left inputs, *right inputs, *left params, *right params)
return args[self.left.n_inputs: self.left.n_inputs + self.right.n_inputs]
elif op == '|' or op == 'fix_inputs':
return None
else:
return args[:self.left.n_inputs]
def _get_left_params_from_args(self, args):
op = self.op
if op == '&':
# Args expected to look like (*left inputs, *right inputs, *left params, *right params)
n_inputs = self.left.n_inputs + self.right.n_inputs
return args[n_inputs: n_inputs + self.n_left_params]
else:
return args[self.left.n_inputs: self.left.n_inputs + self.n_left_params]
def _get_right_params_from_args(self, args):
op = self.op
if op == 'fix_inputs':
return None
if op == '&':
# Args expected to look like (*left inputs, *right inputs, *left params, *right params)
return args[self.left.n_inputs + self.right.n_inputs + self.n_left_params:]
else:
return args[self.left.n_inputs + self.n_left_params:]
def _get_kwarg_model_parameters_as_positional(self, args, kwargs):
# could do it with inserts but rebuilding seems like simpilist way
# TODO: Check if any param names are in kwargs maybe as an intersection of sets?
if self.op == "&":
new_args = list(args[:self.left.n_inputs + self.right.n_inputs])
args_pos = self.left.n_inputs + self.right.n_inputs
else:
new_args = list(args[:self.left.n_inputs])
args_pos = self.left.n_inputs
for param_name in self.param_names:
kw_value = kwargs.pop(param_name, None)
if kw_value is not None:
value = kw_value
else:
try:
value = args[args_pos]
except IndexError:
raise IndexError("Missing parameter or input")
args_pos += 1
new_args.append(value)
return new_args, kwargs
def _apply_operators_to_value_lists(self, leftval, rightval, **kw):
op = self.op
if op == '+':
return binary_operation(operator.add, leftval, rightval)
elif op == '-':
return binary_operation(operator.sub, leftval, rightval)
elif op == '*':
return binary_operation(operator.mul, leftval, rightval)
elif op == '/':
return binary_operation(operator.truediv, leftval, rightval)
elif op == '**':
return binary_operation(operator.pow, leftval, rightval)
elif op == '&':
if not isinstance(leftval, tuple):
leftval = (leftval,)
if not isinstance(rightval, tuple):
rightval = (rightval,)
return leftval + rightval
elif op in SPECIAL_OPERATORS:
return binary_operation(SPECIAL_OPERATORS[op], leftval, rightval)
else:
raise ModelDefinitionError('Unrecognized operator {op}')
def evaluate(self, *args, **kw):
op = self.op
args, kw = self._get_kwarg_model_parameters_as_positional(args, kw)
left_inputs = self._get_left_inputs_from_args(args)
left_params = self._get_left_params_from_args(args)
if op == 'fix_inputs':
pos_index = dict(zip(self.left.inputs, range(self.left.n_inputs)))
fixed_inputs = {
key if np.issubdtype(type(key), np.integer) else pos_index[key]: value
for key, value in self.right.items()
}
left_inputs = [
fixed_inputs[ind] if ind in fixed_inputs.keys() else inp
for ind, inp in enumerate(left_inputs)
]
leftval = self.left.evaluate(*itertools.chain(left_inputs, left_params))
if op == 'fix_inputs':
return leftval
right_inputs = self._get_right_inputs_from_args(args)
right_params = self._get_right_params_from_args(args)
if op == "|":
if isinstance(leftval, tuple):
return self.right.evaluate(*itertools.chain(leftval, right_params))
else:
return self.right.evaluate(leftval, *right_params)
else:
rightval = self.right.evaluate(*itertools.chain(right_inputs, right_params))
return self._apply_operators_to_value_lists(leftval, rightval, **kw)
@property
def n_submodels(self):
if self._leaflist is None:
self._make_leaflist()
return len(self._leaflist)
@property
def submodel_names(self):
""" Return the names of submodels in a ``CompoundModel``."""
if self._leaflist is None:
self._make_leaflist()
names = [item.name for item in self._leaflist]
nonecount = 0
newnames = []
for item in names:
if item is None:
newnames.append(f'None_{nonecount}')
nonecount += 1
else:
newnames.append(item)
return tuple(newnames)
def both_inverses_exist(self):
"""
if both members of this compound model have inverses return True
"""
import warnings
from astropy.utils.exceptions import AstropyDeprecationWarning
warnings.warn(
"CompoundModel.both_inverses_exist is deprecated. "
"Use has_inverse instead.",
AstropyDeprecationWarning
)
try:
self.left.inverse
self.right.inverse
except NotImplementedError:
return False
return True
def _pre_evaluate(self, *args, **kwargs):
"""
CompoundModel specific input setup that needs to occur prior to
model evaluation.
Note
----
All of the _pre_evaluate for each component model will be
performed at the time that the individual model is evaluated.
"""
# If equivalencies are provided, necessary to map parameters and pass
# the leaflist as a keyword input for use by model evaluation so that
# the compound model input names can be matched to the model input
# names.
if 'equivalencies' in kwargs:
# Restructure to be useful for the individual model lookup
kwargs['inputs_map'] = [(value[0], (value[1], key)) for
key, value in self.inputs_map().items()]
# Setup actual model evaluation method
def evaluate(_inputs):
return self._evaluate(*_inputs, **kwargs)
return evaluate, args, None, kwargs
@property
def _argnames(self):
"""No inputs should be used to determine input_shape when handling compound models"""
return ()
def _post_evaluate(self, inputs, outputs, broadcasted_shapes, with_bbox, **kwargs):
"""
CompoundModel specific post evaluation processing of outputs
Note
----
All of the _post_evaluate for each component model will be
performed at the time that the individual model is evaluated.
"""
if self.get_bounding_box(with_bbox) is not None and self.n_outputs == 1:
return outputs[0]
return outputs
def _evaluate(self, *args, **kw):
op = self.op
if op != 'fix_inputs':
if op != '&':
leftval = self.left(*args, **kw)
if op != '|':
rightval = self.right(*args, **kw)
else:
rightval = None
else:
leftval = self.left(*(args[:self.left.n_inputs]), **kw)
rightval = self.right(*(args[self.left.n_inputs:]), **kw)
if op != "|":
return self._apply_operators_to_value_lists(leftval, rightval, **kw)
elif op == '|':
if isinstance(leftval, tuple):
return self.right(*leftval, **kw)
else:
return self.right(leftval, **kw)
else:
subs = self.right
newargs = list(args)
subinds = []
subvals = []
for key in subs.keys():
if np.issubdtype(type(key), np.integer):
subinds.append(key)
elif isinstance(key, str):
ind = self.left.inputs.index(key)
subinds.append(ind)
subvals.append(subs[key])
# Turn inputs specified in kw into positional indices.
# Names for compound inputs do not propagate to sub models.
kwind = []
kwval = []
for kwkey in list(kw.keys()):
if kwkey in self.inputs:
ind = self.inputs.index(kwkey)
if ind < len(args):
raise ValueError("Keyword argument duplicates "
"positional value supplied.")
kwind.append(ind)
kwval.append(kw[kwkey])
del kw[kwkey]
# Build new argument list
# Append keyword specified args first
if kwind:
kwargs = list(zip(kwind, kwval))
kwargs.sort()
kwindsorted, kwvalsorted = list(zip(*kwargs))
newargs = newargs + list(kwvalsorted)
if subinds:
subargs = list(zip(subinds, subvals))
subargs.sort()
# subindsorted, subvalsorted = list(zip(*subargs))
# The substitutions must be inserted in order
for ind, val in subargs:
newargs.insert(ind, val)
return self.left(*newargs, **kw)
@property
def param_names(self):
""" An ordered list of parameter names."""
return self._param_names
def _make_leaflist(self):
tdict = {}
leaflist = []
make_subtree_dict(self, '', tdict, leaflist)
self._leaflist = leaflist
self._tdict = tdict
def __getattr__(self, name):
"""
If someone accesses an attribute not already defined, map the
parameters, and then see if the requested attribute is one of
the parameters
"""
# The following test is needed to avoid infinite recursion
# caused by deepcopy. There may be other such cases discovered.
if name == '__setstate__':
raise AttributeError
if name in self._param_names:
return self.__dict__[name]
else:
raise AttributeError(f'Attribute "{name}" not found')
def __getitem__(self, index):
if self._leaflist is None:
self._make_leaflist()
leaflist = self._leaflist
tdict = self._tdict
if isinstance(index, slice):
if index.step:
raise ValueError('Steps in slices not supported '
'for compound models')
if index.start is not None:
if isinstance(index.start, str):
start = self._str_index_to_int(index.start)
else:
start = index.start
else:
start = 0
if index.stop is not None:
if isinstance(index.stop, str):
stop = self._str_index_to_int(index.stop)
else:
stop = index.stop - 1
else:
stop = len(leaflist) - 1
if index.stop == 0:
raise ValueError("Slice endpoint cannot be 0")
if start < 0:
start = len(leaflist) + start
if stop < 0:
stop = len(leaflist) + stop
# now search for matching node:
if stop == start: # only single value, get leaf instead in code below
index = start
else:
for key in tdict:
node, leftind, rightind = tdict[key]
if leftind == start and rightind == stop:
return node
raise IndexError("No appropriate subtree matches slice")
if np.issubdtype(type(index), np.integer):
return leaflist[index]
elif isinstance(index, str):
return leaflist[self._str_index_to_int(index)]
else:
raise TypeError('index must be integer, slice, or model name string')
def _str_index_to_int(self, str_index):
# Search through leaflist for item with that name
found = []
for nleaf, leaf in enumerate(self._leaflist):
if getattr(leaf, 'name', None) == str_index:
found.append(nleaf)
if len(found) == 0:
raise IndexError(f"No component with name '{str_index}' found")
if len(found) > 1:
raise IndexError(f"Multiple components found using '{str_index}' as name\n"
f"at indices {found}")
return found[0]
@property
def n_inputs(self):
""" The number of inputs of a model."""
return self._n_inputs
@n_inputs.setter
def n_inputs(self, value):
self._n_inputs = value
@property
def n_outputs(self):
""" The number of outputs of a model."""
return self._n_outputs
@n_outputs.setter
def n_outputs(self, value):
self._n_outputs = value
@property
def eqcons(self):
return self._eqcons
@eqcons.setter
def eqcons(self, value):
self._eqcons = value
@property
def ineqcons(self):
return self._eqcons
@ineqcons.setter
def ineqcons(self, value):
self._eqcons = value
def traverse_postorder(self, include_operator=False):
""" Postorder traversal of the CompoundModel tree."""
res = []
if isinstance(self.left, CompoundModel):
res = res + self.left.traverse_postorder(include_operator)
else:
res = res + [self.left]
if isinstance(self.right, CompoundModel):
res = res + self.right.traverse_postorder(include_operator)
else:
res = res + [self.right]
if include_operator:
res.append(self.op)
else:
res.append(self)
return res
def _format_expression(self, format_leaf=None):
leaf_idx = 0
operands = deque()
if format_leaf is None:
format_leaf = lambda i, l: f'[{i}]' # noqa: E731
for node in self.traverse_postorder():
if not isinstance(node, CompoundModel):
operands.append(format_leaf(leaf_idx, node))
leaf_idx += 1
continue
right = operands.pop()
left = operands.pop()
if node.op in OPERATOR_PRECEDENCE:
oper_order = OPERATOR_PRECEDENCE[node.op]
if isinstance(node, CompoundModel):
if (isinstance(node.left, CompoundModel) and
OPERATOR_PRECEDENCE[node.left.op] < oper_order):
left = f'({left})'
if (isinstance(node.right, CompoundModel) and
OPERATOR_PRECEDENCE[node.right.op] < oper_order):
right = f'({right})'
operands.append(' '.join((left, node.op, right)))
else:
left = f'(({left}),'
right = f'({right}))'
operands.append(' '.join((node.op[0], left, right)))
return ''.join(operands)
def _format_components(self):
if self._parameters_ is None:
self._map_parameters()
return "\n\n".join(f"[{idx}]: {m!r}"
for idx, m in enumerate(self._leaflist))
def __str__(self):
expression = self._format_expression()
components = self._format_components()
keywords = [
('Expression', expression),
('Components', '\n' + indent(components))
]
return super()._format_str(keywords=keywords)
def rename(self, name):
self.name = name
return self
@property
def isleaf(self):
return False
@property
def inverse(self):
if self.op == '|':
return self.right.inverse | self.left.inverse
elif self.op == '&':
return self.left.inverse & self.right.inverse
else:
return NotImplemented
@property
def fittable(self):
""" Set the fittable attribute on a compound model."""
if self._fittable is None:
if self._leaflist is None:
self._map_parameters()
self._fittable = all(m.fittable for m in self._leaflist)
return self._fittable
__add__ = _model_oper('+')
__sub__ = _model_oper('-')
__mul__ = _model_oper('*')
__truediv__ = _model_oper('/')
__pow__ = _model_oper('**')
__or__ = _model_oper('|')
__and__ = _model_oper('&')
def _map_parameters(self):
"""
Map all the constituent model parameters to the compound object,
renaming as necessary by appending a suffix number.
This can be an expensive operation, particularly for a complex
expression tree.
All the corresponding parameter attributes are created that one
expects for the Model class.
The parameter objects that the attributes point to are the same
objects as in the constiutent models. Changes made to parameter
values to either are seen by both.
Prior to calling this, none of the associated attributes will
exist. This method must be called to make the model usable by
fitting engines.
If oldnames=True, then parameters are named as in the original
implementation of compound models.
"""
if self._parameters is not None:
# do nothing
return
if self._leaflist is None:
self._make_leaflist()
self._parameters_ = {}
param_map = {}
self._param_names = []
for lindex, leaf in enumerate(self._leaflist):
if not isinstance(leaf, dict):
for param_name in leaf.param_names:
param = getattr(leaf, param_name)
new_param_name = f"{param_name}_{lindex}"
self.__dict__[new_param_name] = param
self._parameters_[new_param_name] = param
self._param_names.append(new_param_name)
param_map[new_param_name] = (lindex, param_name)
self._param_metrics = {}
self._param_map = param_map
self._param_map_inverse = {v: k for k, v in param_map.items()}
self._initialize_slices()
self._param_names = tuple(self._param_names)
def _initialize_slices(self):
param_metrics = self._param_metrics
total_size = 0
for name in self.param_names:
param = getattr(self, name)
value = param.value
param_size = np.size(value)
param_shape = np.shape(value)
param_slice = slice(total_size, total_size + param_size)
param_metrics[name] = {}
param_metrics[name]['slice'] = param_slice
param_metrics[name]['shape'] = param_shape
param_metrics[name]['size'] = param_size
total_size += param_size
self._parameters = np.empty(total_size, dtype=np.float64)
@staticmethod
def _recursive_lookup(branch, adict, key):
if isinstance(branch, CompoundModel):
return adict[key]
return branch, key
def inputs_map(self):
"""
Map the names of the inputs to this ExpressionTree to the inputs to the leaf models.
"""
inputs_map = {}
if not isinstance(self.op, str): # If we don't have an operator the mapping is trivial
return {inp: (self, inp) for inp in self.inputs}
elif self.op == '|':
if isinstance(self.left, CompoundModel):
l_inputs_map = self.left.inputs_map()
for inp in self.inputs:
if isinstance(self.left, CompoundModel):
inputs_map[inp] = l_inputs_map[inp]
else:
inputs_map[inp] = self.left, inp
elif self.op == '&':
if isinstance(self.left, CompoundModel):
l_inputs_map = self.left.inputs_map()
if isinstance(self.right, CompoundModel):
r_inputs_map = self.right.inputs_map()
for i, inp in enumerate(self.inputs):
if i < len(self.left.inputs): # Get from left
if isinstance(self.left, CompoundModel):
inputs_map[inp] = l_inputs_map[self.left.inputs[i]]
else:
inputs_map[inp] = self.left, self.left.inputs[i]
else: # Get from right
if isinstance(self.right, CompoundModel):
inputs_map[inp] = r_inputs_map[self.right.inputs[i - len(self.left.inputs)]]
else:
inputs_map[inp] = self.right, self.right.inputs[i - len(self.left.inputs)]
elif self.op == 'fix_inputs':
fixed_ind = list(self.right.keys())
ind = [list(self.left.inputs).index(i) if isinstance(i, str) else i for i in fixed_ind]
inp_ind = list(range(self.left.n_inputs))
for i in ind:
inp_ind.remove(i)
for i in inp_ind:
inputs_map[self.left.inputs[i]] = self.left, self.left.inputs[i]
else:
if isinstance(self.left, CompoundModel):
l_inputs_map = self.left.inputs_map()
for inp in self.left.inputs:
if isinstance(self.left, CompoundModel):
inputs_map[inp] = l_inputs_map[inp]
else:
inputs_map[inp] = self.left, inp
return inputs_map
def _parameter_units_for_data_units(self, input_units, output_units):
if self._leaflist is None:
self._map_parameters()
units_for_data = {}
for imodel, model in enumerate(self._leaflist):
units_for_data_leaf = model._parameter_units_for_data_units(input_units, output_units)
for param_leaf in units_for_data_leaf:
param = self._param_map_inverse[(imodel, param_leaf)]
units_for_data[param] = units_for_data_leaf[param_leaf]
return units_for_data
@property
def input_units(self):
inputs_map = self.inputs_map()
input_units_dict = {key: inputs_map[key][0].input_units[orig_key]
for key, (mod, orig_key) in inputs_map.items()
if inputs_map[key][0].input_units is not None}
if input_units_dict:
return input_units_dict
return None
@property
def input_units_equivalencies(self):
inputs_map = self.inputs_map()
input_units_equivalencies_dict = {
key: inputs_map[key][0].input_units_equivalencies[orig_key]
for key, (mod, orig_key) in inputs_map.items()
if inputs_map[key][0].input_units_equivalencies is not None
}
if not input_units_equivalencies_dict:
return None
return input_units_equivalencies_dict
@property
def input_units_allow_dimensionless(self):
inputs_map = self.inputs_map()
return {key: inputs_map[key][0].input_units_allow_dimensionless[orig_key]
for key, (mod, orig_key) in inputs_map.items()}
@property
def input_units_strict(self):
inputs_map = self.inputs_map()
return {key: inputs_map[key][0].input_units_strict[orig_key]
for key, (mod, orig_key) in inputs_map.items()}
@property
def return_units(self):
outputs_map = self.outputs_map()
return {key: outputs_map[key][0].return_units[orig_key]
for key, (mod, orig_key) in outputs_map.items()
if outputs_map[key][0].return_units is not None}
def outputs_map(self):
"""
Map the names of the outputs to this ExpressionTree to the outputs to the leaf models.
"""
outputs_map = {}
if not isinstance(self.op, str): # If we don't have an operator the mapping is trivial
return {out: (self, out) for out in self.outputs}
elif self.op == '|':
if isinstance(self.right, CompoundModel):
r_outputs_map = self.right.outputs_map()
for out in self.outputs:
if isinstance(self.right, CompoundModel):
outputs_map[out] = r_outputs_map[out]
else:
outputs_map[out] = self.right, out
elif self.op == '&':
if isinstance(self.left, CompoundModel):
l_outputs_map = self.left.outputs_map()
if isinstance(self.right, CompoundModel):
r_outputs_map = self.right.outputs_map()
for i, out in enumerate(self.outputs):
if i < len(self.left.outputs): # Get from left
if isinstance(self.left, CompoundModel):
outputs_map[out] = l_outputs_map[self.left.outputs[i]]
else:
outputs_map[out] = self.left, self.left.outputs[i]
else: # Get from right
if isinstance(self.right, CompoundModel):
outputs_map[out] = r_outputs_map[self.right.outputs[
i - len(self.left.outputs)]]
else:
outputs_map[out] = self.right, self.right.outputs[
i - len(self.left.outputs)]
elif self.op == 'fix_inputs':
return self.left.outputs_map()
else:
if isinstance(self.left, CompoundModel):
l_outputs_map = self.left.outputs_map()
for out in self.left.outputs:
if isinstance(self.left, CompoundModel):
outputs_map[out] = l_outputs_map()[out]
else:
outputs_map[out] = self.left, out
return outputs_map
@property
def has_user_bounding_box(self):
"""
A flag indicating whether or not a custom bounding_box has been
assigned to this model by a user, via assignment to
``model.bounding_box``.
"""
return self._user_bounding_box is not None
def render(self, out=None, coords=None):
"""
Evaluate a model at fixed positions, respecting the ``bounding_box``.
The key difference relative to evaluating the model directly is that
this method is limited to a bounding box if the `Model.bounding_box`
attribute is set.
Parameters
----------
out : `numpy.ndarray`, optional
An array that the evaluated model will be added to. If this is not
given (or given as ``None``), a new array will be created.
coords : array-like, optional
An array to be used to translate from the model's input coordinates
to the ``out`` array. It should have the property that
``self(coords)`` yields the same shape as ``out``. If ``out`` is
not specified, ``coords`` will be used to determine the shape of
the returned array. If this is not provided (or None), the model
will be evaluated on a grid determined by `Model.bounding_box`.
Returns
-------
out : `numpy.ndarray`
The model added to ``out`` if ``out`` is not ``None``, or else a
new array from evaluating the model over ``coords``.
If ``out`` and ``coords`` are both `None`, the returned array is
limited to the `Model.bounding_box` limits. If
`Model.bounding_box` is `None`, ``arr`` or ``coords`` must be
passed.
Raises
------
ValueError
If ``coords`` are not given and the the `Model.bounding_box` of
this model is not set.
Examples
--------
:ref:`astropy:bounding-boxes`
"""
bbox = self.get_bounding_box()
ndim = self.n_inputs
if (coords is None) and (out is None) and (bbox is None):
raise ValueError('If no bounding_box is set, '
'coords or out must be input.')
# for consistent indexing
if ndim == 1:
if coords is not None:
coords = [coords]
if bbox is not None:
bbox = [bbox]
if coords is not None:
coords = np.asanyarray(coords, dtype=float)
# Check dimensions match out and model
assert len(coords) == ndim
if out is not None:
if coords[0].shape != out.shape:
raise ValueError('inconsistent shape of the output.')
else:
out = np.zeros(coords[0].shape)
if out is not None:
out = np.asanyarray(out)
if out.ndim != ndim:
raise ValueError('the array and model must have the same '
'number of dimensions.')
if bbox is not None:
# Assures position is at center pixel, important when using
# add_array.
pd = np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2))
for bb in bbox]).astype(int).T
pos, delta = pd
if coords is not None:
sub_shape = tuple(delta * 2 + 1)
sub_coords = np.array([extract_array(c, sub_shape, pos)
for c in coords])
else:
limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T]
sub_coords = np.mgrid[limits]
sub_coords = sub_coords[::-1]
if out is None:
out = self(*sub_coords)
else:
try:
out = add_array(out, self(*sub_coords), pos)
except ValueError:
raise ValueError(
'The `bounding_box` is larger than the input out in '
'one or more dimensions. Set '
'`model.bounding_box = None`.')
else:
if coords is None:
im_shape = out.shape
limits = [slice(i) for i in im_shape]
coords = np.mgrid[limits]
coords = coords[::-1]
out += self(*coords)
return out
def replace_submodel(self, name, model):
"""
Construct a new `~astropy.modeling.CompoundModel` instance from an
existing CompoundModel, replacing the named submodel with a new model.
In order to ensure that inverses and names are kept/reconstructed, it's
necessary to rebuild the CompoundModel from the replaced node all the
way back to the base. The original CompoundModel is left untouched.
Parameters
----------
name : str
name of submodel to be replaced
model : `~astropy.modeling.Model`
replacement model
"""
submodels = [m for m in self.traverse_postorder()
if getattr(m, 'name', None) == name]
if submodels:
if len(submodels) > 1:
raise ValueError(f"More than one submodel named {name}")
old_model = submodels.pop()
if len(old_model) != len(model):
raise ValueError("New and old models must have equal values "
"for n_models")
# Do this check first in order to raise a more helpful Exception,
# although it would fail trying to construct the new CompoundModel
if (old_model.n_inputs != model.n_inputs or
old_model.n_outputs != model.n_outputs):
raise ValueError("New model must match numbers of inputs and "
"outputs of existing model")
tree = _get_submodel_path(self, name)
while tree:
branch = self.copy()
for node in tree[:-1]:
branch = getattr(branch, node)
setattr(branch, tree[-1], model)
model = CompoundModel(branch.op, branch.left, branch.right,
name=branch.name)
tree = tree[:-1]
return model
else:
raise ValueError(f"No submodels found named {name}")
def _set_sub_models_and_parameter_units(self, left, right):
"""
Provides a work-around to properly set the sub models and respective
parameters's units/values when using ``without_units_for_data``
or ``without_units_for_data`` methods.
"""
model = CompoundModel(self.op, left, right)
self.left = left
self.right = right
for name in model.param_names:
model_parameter = getattr(model, name)
parameter = getattr(self, name)
parameter.value = model_parameter.value
parameter._set_unit(model_parameter.unit, force=True)
def without_units_for_data(self, **kwargs):
"""
See `~astropy.modeling.Model.without_units_for_data` for overview
of this method.
Notes
-----
This modifies the behavior of the base method to account for the
case where the sub-models of a compound model have different output
units. This is only valid for compound * and / compound models as
in that case it is reasonable to mix the output units. It does this
by modifying the output units of each sub model by using the output
units of the other sub model so that we can apply the original function
and get the desired result.
Additional data has to be output in the mixed output unit case
so that the units can be properly rebuilt by
`~astropy.modeling.CompoundModel.with_units_from_data`.
Outside the mixed output units, this method is identical to the
base method.
"""
if self.op in ['*', '/']:
model = self.copy()
inputs = {inp: kwargs[inp] for inp in self.inputs}
left_units = self.left.output_units(**kwargs)
right_units = self.right.output_units(**kwargs)
if self.op == '*':
left_kwargs = {out: kwargs[out] / right_units[out]
for out in self.left.outputs if kwargs[out] is not None}
right_kwargs = {out: kwargs[out] / left_units[out]
for out in self.right.outputs if kwargs[out] is not None}
else:
left_kwargs = {out: kwargs[out] * right_units[out]
for out in self.left.outputs if kwargs[out] is not None}
right_kwargs = {out: 1 / kwargs[out] * left_units[out]
for out in self.right.outputs if kwargs[out] is not None}
left_kwargs.update(inputs.copy())
right_kwargs.update(inputs.copy())
left = self.left.without_units_for_data(**left_kwargs)
if isinstance(left, tuple):
left_kwargs['_left_kwargs'] = left[1]
left_kwargs['_right_kwargs'] = left[2]
left = left[0]
right = self.right.without_units_for_data(**right_kwargs)
if isinstance(right, tuple):
right_kwargs['_left_kwargs'] = right[1]
right_kwargs['_right_kwargs'] = right[2]
right = right[0]
model._set_sub_models_and_parameter_units(left, right)
return model, left_kwargs, right_kwargs
else:
return super().without_units_for_data(**kwargs)
def with_units_from_data(self, **kwargs):
"""
See `~astropy.modeling.Model.with_units_from_data` for overview
of this method.
Notes
-----
This modifies the behavior of the base method to account for the
case where the sub-models of a compound model have different output
units. This is only valid for compound * and / compound models as
in that case it is reasonable to mix the output units. In order to
do this it requires some additional information output by
`~astropy.modeling.CompoundModel.without_units_for_data` passed as
keyword arguments under the keywords ``_left_kwargs`` and ``_right_kwargs``.
Outside the mixed output units, this method is identical to the
base method.
"""
if self.op in ['*', '/']:
left_kwargs = kwargs.pop('_left_kwargs')
right_kwargs = kwargs.pop('_right_kwargs')
left = self.left.with_units_from_data(**left_kwargs)
right = self.right.with_units_from_data(**right_kwargs)
model = self.copy()
model._set_sub_models_and_parameter_units(left, right)
return model
else:
return super().with_units_from_data(**kwargs)
def _get_submodel_path(model, name):
"""Find the route down a CompoundModel's tree to the model with the
specified name (whether it's a leaf or not)"""
if getattr(model, 'name', None) == name:
return []
try:
return ['left'] + _get_submodel_path(model.left, name)
except (AttributeError, TypeError):
pass
try:
return ['right'] + _get_submodel_path(model.right, name)
except (AttributeError, TypeError):
pass
def binary_operation(binoperator, left, right):
'''
Perform binary operation. Operands may be matching tuples of operands.
'''
if isinstance(left, tuple) and isinstance(right, tuple):
return tuple(binoperator(item[0], item[1])
for item in zip(left, right))
return binoperator(left, right)
def get_ops(tree, opset):
"""
Recursive function to collect operators used.
"""
if isinstance(tree, CompoundModel):
opset.add(tree.op)
get_ops(tree.left, opset)
get_ops(tree.right, opset)
else:
return
def make_subtree_dict(tree, nodepath, tdict, leaflist):
'''
Traverse a tree noting each node by a key that indicates all the
left/right choices necessary to reach that node. Each key will
reference a tuple that contains:
- reference to the compound model for that node.
- left most index contained within that subtree
(relative to all indices for the whole tree)
- right most index contained within that subtree
'''
# if this is a leaf, just append it to the leaflist
if not hasattr(tree, 'isleaf'):
leaflist.append(tree)
else:
leftmostind = len(leaflist)
make_subtree_dict(tree.left, nodepath+'l', tdict, leaflist)
make_subtree_dict(tree.right, nodepath+'r', tdict, leaflist)
rightmostind = len(leaflist)-1
tdict[nodepath] = (tree, leftmostind, rightmostind)
_ORDER_OF_OPERATORS = [('fix_inputs',), ('|',), ('&',), ('+', '-'), ('*', '/'), ('**',)]
OPERATOR_PRECEDENCE = {}
for idx, ops in enumerate(_ORDER_OF_OPERATORS):
for op in ops:
OPERATOR_PRECEDENCE[op] = idx
del idx, op, ops
def fix_inputs(modelinstance, values, bounding_boxes=None, selector_args=None):
"""
This function creates a compound model with one or more of the input
values of the input model assigned fixed values (scalar or array).
Parameters
----------
modelinstance : `~astropy.modeling.Model` instance
This is the model that one or more of the
model input values will be fixed to some constant value.
values : dict
A dictionary where the key identifies which input to fix
and its value is the value to fix it at. The key may either be the
name of the input or a number reflecting its order in the inputs.
Examples
--------
>>> from astropy.modeling.models import Gaussian2D
>>> g = Gaussian2D(1, 2, 3, 4, 5)
>>> gv = fix_inputs(g, {0: 2.5})
Results in a 1D function equivalent to Gaussian2D(1, 2, 3, 4, 5)(x=2.5, y)
"""
model = CompoundModel('fix_inputs', modelinstance, values)
if bounding_boxes is not None:
if selector_args is None:
selector_args = tuple((key, True) for key in values.keys())
bbox = CompoundBoundingBox.validate(modelinstance, bounding_boxes, selector_args)
_selector = bbox.selector_args.get_fixed_values(modelinstance, values)
new_bbox = bbox[_selector]
new_bbox = new_bbox.__class__.validate(model, new_bbox)
model.bounding_box = new_bbox
return model
def bind_bounding_box(modelinstance, bounding_box, ignored=None, order='C'):
"""
Set a validated bounding box to a model instance.
Parameters
----------
modelinstance : `~astropy.modeling.Model` instance
This is the model that the validated bounding box will be set on.
bounding_box : tuple
A bounding box tuple, see :ref:`astropy:bounding-boxes` for details
ignored : list
List of the inputs to be ignored by the bounding box.
order : str, optional
The ordering of the bounding box tuple, can be either ``'C'`` or
``'F'``.
"""
modelinstance.bounding_box = ModelBoundingBox.validate(modelinstance,
bounding_box,
ignored=ignored,
order=order)
def bind_compound_bounding_box(modelinstance, bounding_boxes, selector_args,
create_selector=None, ignored=None, order='C'):
"""
Add a validated compound bounding box to a model instance.
Parameters
----------
modelinstance : `~astropy.modeling.Model` instance
This is the model that the validated compound bounding box will be set on.
bounding_boxes : dict
A dictionary of bounding box tuples, see :ref:`astropy:bounding-boxes`
for details.
selector_args : list
List of selector argument tuples to define selection for compound
bounding box, see :ref:`astropy:bounding-boxes` for details.
create_selector : callable, optional
An optional callable with interface (selector_value, model) which
can generate a bounding box based on a selector value and model if
there is no bounding box in the compound bounding box listed under
that selector value. Default is ``None``, meaning new bounding
box entries will not be automatically generated.
ignored : list
List of the inputs to be ignored by the bounding box.
order : str, optional
The ordering of the bounding box tuple, can be either ``'C'`` or
``'F'``.
"""
modelinstance.bounding_box = CompoundBoundingBox.validate(modelinstance,
bounding_boxes, selector_args,
create_selector=create_selector,
ignored=ignored,
order=order)
def custom_model(*args, fit_deriv=None):
"""
Create a model from a user defined function. The inputs and parameters of
the model will be inferred from the arguments of the function.
This can be used either as a function or as a decorator. See below for
examples of both usages.
The model is separable only if there is a single input.
.. note::
All model parameters have to be defined as keyword arguments with
default values in the model function. Use `None` as a default argument
value if you do not want to have a default value for that parameter.
The standard settable model properties can be configured by default
using keyword arguments matching the name of the property; however,
these values are not set as model "parameters". Moreover, users
cannot use keyword arguments matching non-settable model properties,
with the exception of ``n_outputs`` which should be set to the number of
outputs of your function.
Parameters
----------
func : function
Function which defines the model. It should take N positional
arguments where ``N`` is dimensions of the model (the number of
independent variable in the model), and any number of keyword arguments
(the parameters). It must return the value of the model (typically as
an array, but can also be a scalar for scalar inputs). This
corresponds to the `~astropy.modeling.Model.evaluate` method.
fit_deriv : function, optional
Function which defines the Jacobian derivative of the model. I.e., the
derivative with respect to the *parameters* of the model. It should
have the same argument signature as ``func``, but should return a
sequence where each element of the sequence is the derivative
with respect to the corresponding argument. This corresponds to the
:meth:`~astropy.modeling.FittableModel.fit_deriv` method.
Examples
--------
Define a sinusoidal model function as a custom 1D model::
>>> from astropy.modeling.models import custom_model
>>> import numpy as np
>>> def sine_model(x, amplitude=1., frequency=1.):
... return amplitude * np.sin(2 * np.pi * frequency * x)
>>> def sine_deriv(x, amplitude=1., frequency=1.):
... return 2 * np.pi * amplitude * np.cos(2 * np.pi * frequency * x)
>>> SineModel = custom_model(sine_model, fit_deriv=sine_deriv)
Create an instance of the custom model and evaluate it::
>>> model = SineModel()
>>> model(0.25)
1.0
This model instance can now be used like a usual astropy model.
The next example demonstrates a 2D Moffat function model, and also
demonstrates the support for docstrings (this example could also include
a derivative, but it has been omitted for simplicity)::
>>> @custom_model
... def Moffat2D(x, y, amplitude=1.0, x_0=0.0, y_0=0.0, gamma=1.0,
... alpha=1.0):
... \"\"\"Two dimensional Moffat function.\"\"\"
... rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2
... return amplitude * (1 + rr_gg) ** (-alpha)
...
>>> print(Moffat2D.__doc__)
Two dimensional Moffat function.
>>> model = Moffat2D()
>>> model(1, 1) # doctest: +FLOAT_CMP
0.3333333333333333
"""
if len(args) == 1 and callable(args[0]):
return _custom_model_wrapper(args[0], fit_deriv=fit_deriv)
elif not args:
return functools.partial(_custom_model_wrapper, fit_deriv=fit_deriv)
else:
raise TypeError(
f"{__name__} takes at most one positional argument (the callable/"
"function to be turned into a model. When used as a decorator "
"it should be passed keyword arguments only (if "
"any).")
def _custom_model_inputs(func):
"""
Processes the inputs to the `custom_model`'s function into the appropriate
categories.
Parameters
----------
func : callable
Returns
-------
inputs : list
list of evaluation inputs
special_params : dict
dictionary of model properties which require special treatment
settable_params : dict
dictionary of defaults for settable model properties
params : dict
dictionary of model parameters set by `custom_model`'s function
"""
inputs, parameters = get_inputs_and_params(func)
special = ['n_outputs']
settable = [attr for attr, value in vars(Model).items()
if isinstance(value, property) and value.fset is not None]
properties = [attr for attr, value in vars(Model).items()
if isinstance(value, property) and value.fset is None and attr not in special]
special_params = {}
settable_params = {}
params = {}
for param in parameters:
if param.name in special:
special_params[param.name] = param.default
elif param.name in settable:
settable_params[param.name] = param.default
elif param.name in properties:
raise ValueError(f"Parameter '{param.name}' cannot be a model property: {properties}.")
else:
params[param.name] = param.default
return inputs, special_params, settable_params, params
def _custom_model_wrapper(func, fit_deriv=None):
"""
Internal implementation `custom_model`.
When `custom_model` is called as a function its arguments are passed to
this function, and the result of this function is returned.
When `custom_model` is used as a decorator a partial evaluation of this
function is returned by `custom_model`.
"""
if not callable(func):
raise ModelDefinitionError(
"func is not callable; it must be a function or other callable "
"object")
if fit_deriv is not None and not callable(fit_deriv):
raise ModelDefinitionError(
"fit_deriv not callable; it must be a function or other "
"callable object")
model_name = func.__name__
inputs, special_params, settable_params, params = _custom_model_inputs(func)
if (fit_deriv is not None and
len(fit_deriv.__defaults__) != len(params)):
raise ModelDefinitionError("derivative function should accept "
"same number of parameters as func.")
params = {param: Parameter(param, default=default)
for param, default in params.items()}
mod = find_current_module(2)
if mod:
modname = mod.__name__
else:
modname = '__main__'
members = {
'__module__': str(modname),
'__doc__': func.__doc__,
'n_inputs': len(inputs),
'n_outputs': special_params.pop('n_outputs', 1),
'evaluate': staticmethod(func),
'_settable_properties': settable_params
}
if fit_deriv is not None:
members['fit_deriv'] = staticmethod(fit_deriv)
members.update(params)
cls = type(model_name, (FittableModel,), members)
cls._separable = True if (len(inputs) == 1) else False
return cls
def render_model(model, arr=None, coords=None):
"""
Evaluates a model on an input array. Evaluation is limited to
a bounding box if the `Model.bounding_box` attribute is set.
Parameters
----------
model : `Model`
Model to be evaluated.
arr : `numpy.ndarray`, optional
Array on which the model is evaluated.
coords : array-like, optional
Coordinate arrays mapping to ``arr``, such that
``arr[coords] == arr``.
Returns
-------
array : `numpy.ndarray`
The model evaluated on the input ``arr`` or a new array from
``coords``.
If ``arr`` and ``coords`` are both `None`, the returned array is
limited to the `Model.bounding_box` limits. If
`Model.bounding_box` is `None`, ``arr`` or ``coords`` must be passed.
Examples
--------
:ref:`astropy:bounding-boxes`
"""
bbox = model.bounding_box
if (coords is None) & (arr is None) & (bbox is None):
raise ValueError('If no bounding_box is set,'
'coords or arr must be input.')
# for consistent indexing
if model.n_inputs == 1:
if coords is not None:
coords = [coords]
if bbox is not None:
bbox = [bbox]
if arr is not None:
arr = arr.copy()
# Check dimensions match model
if arr.ndim != model.n_inputs:
raise ValueError('number of array dimensions inconsistent with '
'number of model inputs.')
if coords is not None:
# Check dimensions match arr and model
coords = np.array(coords)
if len(coords) != model.n_inputs:
raise ValueError('coordinate length inconsistent with the number '
'of model inputs.')
if arr is not None:
if coords[0].shape != arr.shape:
raise ValueError('coordinate shape inconsistent with the '
'array shape.')
else:
arr = np.zeros(coords[0].shape)
if bbox is not None:
# assures position is at center pixel, important when using add_array
pd = pos, delta = np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2))
for bb in bbox]).astype(int).T
if coords is not None:
sub_shape = tuple(delta * 2 + 1)
sub_coords = np.array([extract_array(c, sub_shape, pos)
for c in coords])
else:
limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T]
sub_coords = np.mgrid[limits]
sub_coords = sub_coords[::-1]
if arr is None:
arr = model(*sub_coords)
else:
try:
arr = add_array(arr, model(*sub_coords), pos)
except ValueError:
raise ValueError('The `bounding_box` is larger than the input'
' arr in one or more dimensions. Set '
'`model.bounding_box = None`.')
else:
if coords is None:
im_shape = arr.shape
limits = [slice(i) for i in im_shape]
coords = np.mgrid[limits]
arr += model(*coords[::-1])
return arr
def hide_inverse(model):
"""
This is a convenience function intended to disable automatic generation
of the inverse in compound models by disabling one of the constituent
model's inverse. This is to handle cases where user provided inverse
functions are not compatible within an expression.
Example:
compound_model.inverse = hide_inverse(m1) + m2 + m3
This will insure that the defined inverse itself won't attempt to
build its own inverse, which would otherwise fail in this example
(e.g., m = m1 + m2 + m3 happens to raises an exception for this
reason.)
Note that this permanently disables it. To prevent that either copy
the model or restore the inverse later.
"""
del model.inverse
return model
|
3f6cb456d23cf03b918fb81d9353e0786d024ef04d3dddfa4281a599efa31b0b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
"""
Optimization algorithms used in `~astropy.modeling.fitting`.
"""
import abc
import warnings
import numpy as np
from astropy.utils.exceptions import AstropyUserWarning
__all__ = ["Optimization", "SLSQP", "Simplex"]
# Maximum number of iterations
DEFAULT_MAXITER = 100
# Step for the forward difference approximation of the Jacobian
DEFAULT_EPS = np.sqrt(np.finfo(float).eps)
# Default requested accuracy
DEFAULT_ACC = 1e-07
DEFAULT_BOUNDS = (-10 ** 12, 10 ** 12)
class Optimization(metaclass=abc.ABCMeta):
"""
Base class for optimizers.
Parameters
----------
opt_method : callable
Implements optimization method
Notes
-----
The base Optimizer does not support any constraints by default; individual
optimizers should explicitly set this list to the specific constraints
it supports.
"""
supported_constraints = []
def __init__(self, opt_method):
self._opt_method = opt_method
self._maxiter = DEFAULT_MAXITER
self._eps = DEFAULT_EPS
self._acc = DEFAULT_ACC
@property
def maxiter(self):
"""Maximum number of iterations"""
return self._maxiter
@maxiter.setter
def maxiter(self, val):
"""Set maxiter"""
self._maxiter = val
@property
def eps(self):
"""Step for the forward difference approximation of the Jacobian"""
return self._eps
@eps.setter
def eps(self, val):
"""Set eps value"""
self._eps = val
@property
def acc(self):
"""Requested accuracy"""
return self._acc
@acc.setter
def acc(self, val):
"""Set accuracy"""
self._acc = val
def __repr__(self):
fmt = f"{self.__class__.__name__}()"
return fmt
@property
def opt_method(self):
""" Return the optimization method."""
return self._opt_method
@abc.abstractmethod
def __call__(self):
raise NotImplementedError("Subclasses should implement this method")
class SLSQP(Optimization):
"""
Sequential Least Squares Programming optimization algorithm.
The algorithm is described in [1]_. It supports tied and fixed
parameters, as well as bounded constraints. Uses
`scipy.optimize.fmin_slsqp`.
References
----------
.. [1] http://www.netlib.org/toms/733
"""
supported_constraints = ['bounds', 'eqcons', 'ineqcons', 'fixed', 'tied']
def __init__(self):
from scipy.optimize import fmin_slsqp
super().__init__(fmin_slsqp)
self.fit_info = {
'final_func_val': None,
'numiter': None,
'exit_mode': None,
'message': None
}
def __call__(self, objfunc, initval, fargs, **kwargs):
"""
Run the solver.
Parameters
----------
objfunc : callable
objection function
initval : iterable
initial guess for the parameter values
fargs : tuple
other arguments to be passed to the statistic function
kwargs : dict
other keyword arguments to be passed to the solver
"""
kwargs['iter'] = kwargs.pop('maxiter', self._maxiter)
if 'epsilon' not in kwargs:
kwargs['epsilon'] = self._eps
if 'acc' not in kwargs:
kwargs['acc'] = self._acc
# Get the verbosity level
disp = kwargs.pop('verblevel', None)
# set the values of constraints to match the requirements of fmin_slsqp
model = fargs[0]
pars = [getattr(model, name) for name in model.param_names]
bounds = [par.bounds for par in pars if not (par.fixed or par.tied)]
bounds = np.asarray(bounds)
for i in bounds:
if i[0] is None:
i[0] = DEFAULT_BOUNDS[0]
if i[1] is None:
i[1] = DEFAULT_BOUNDS[1]
# older versions of scipy require this array to be float
bounds = np.asarray(bounds, dtype=float)
eqcons = np.array(model.eqcons)
ineqcons = np.array(model.ineqcons)
fitparams, final_func_val, numiter, exit_mode, mess = self.opt_method(
objfunc, initval, args=fargs, full_output=True, disp=disp,
bounds=bounds, eqcons=eqcons, ieqcons=ineqcons,
**kwargs)
self.fit_info['final_func_val'] = final_func_val
self.fit_info['numiter'] = numiter
self.fit_info['exit_mode'] = exit_mode
self.fit_info['message'] = mess
if exit_mode != 0:
warnings.warn("The fit may be unsuccessful; check "
"fit_info['message'] for more information.",
AstropyUserWarning)
return fitparams, self.fit_info
class Simplex(Optimization):
"""
Neald-Mead (downhill simplex) algorithm.
This algorithm [1]_ only uses function values, not derivatives.
Uses `scipy.optimize.fmin`.
References
----------
.. [1] Nelder, J.A. and Mead, R. (1965), "A simplex method for function
minimization", The Computer Journal, 7, pp. 308-313
"""
supported_constraints = ['bounds', 'fixed', 'tied']
def __init__(self):
from scipy.optimize import fmin as simplex
super().__init__(simplex)
self.fit_info = {
'final_func_val': None,
'numiter': None,
'exit_mode': None,
'num_function_calls': None
}
def __call__(self, objfunc, initval, fargs, **kwargs):
"""
Run the solver.
Parameters
----------
objfunc : callable
objection function
initval : iterable
initial guess for the parameter values
fargs : tuple
other arguments to be passed to the statistic function
kwargs : dict
other keyword arguments to be passed to the solver
"""
if 'maxiter' not in kwargs:
kwargs['maxiter'] = self._maxiter
if 'acc' in kwargs:
self._acc = kwargs['acc']
kwargs.pop('acc')
if 'xtol' in kwargs:
self._acc = kwargs['xtol']
kwargs.pop('xtol')
# Get the verbosity level
disp = kwargs.pop('verblevel', None)
fitparams, final_func_val, numiter, funcalls, exit_mode = self.opt_method(
objfunc, initval, args=fargs, xtol=self._acc, disp=disp,
full_output=True, **kwargs)
self.fit_info['final_func_val'] = final_func_val
self.fit_info['numiter'] = numiter
self.fit_info['exit_mode'] = exit_mode
self.fit_info['num_function_calls'] = funcalls
if self.fit_info['exit_mode'] == 1:
warnings.warn("The fit may be unsuccessful; "
"Maximum number of function evaluations reached.",
AstropyUserWarning)
elif self.fit_info['exit_mode'] == 2:
warnings.warn("The fit may be unsuccessful; "
"Maximum number of iterations reached.",
AstropyUserWarning)
return fitparams, self.fit_info
|
4654aaf9bef6a41064355277f3f55a3d0be8dbac78af3234e79a5fa0c4c21ec9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module is to contain an improved bounding box
"""
import abc
import copy
import warnings
from collections import namedtuple
from typing import Any, Callable, Dict, List, Tuple
import numpy as np
from astropy.units import Quantity
from astropy.utils import isiterable
__all__ = ['ModelBoundingBox', 'CompoundBoundingBox']
_BaseInterval = namedtuple('_BaseInterval', "lower upper")
class _Interval(_BaseInterval):
"""
A single input's bounding box interval.
Parameters
----------
lower : float
The lower bound of the interval
upper : float
The upper bound of the interval
Methods
-------
validate :
Contructs a valid interval
outside :
Determine which parts of an input array are outside the interval.
domain :
Contructs a discretization of the points inside the interval.
"""
def __repr__(self):
return f"Interval(lower={self.lower}, upper={self.upper})"
def copy(self):
return copy.deepcopy(self)
@staticmethod
def _validate_shape(interval):
"""Validate the shape of an interval representation"""
MESSAGE = """An interval must be some sort of sequence of length 2"""
try:
shape = np.shape(interval)
except TypeError:
try:
# np.shape does not work with lists of Quantities
if len(interval) == 1:
interval = interval[0]
shape = np.shape([b.to_value() for b in interval])
except (ValueError, TypeError, AttributeError):
raise ValueError(MESSAGE)
valid_shape = shape in ((2,), (1, 2), (2, 0))
if not valid_shape:
valid_shape = (len(shape) > 0 and shape[0] == 2 and
all(isinstance(b, np.ndarray) for b in interval))
if not isiterable(interval) or not valid_shape:
raise ValueError(MESSAGE)
@classmethod
def _validate_bounds(cls, lower, upper):
"""Validate the bounds are reasonable and construct an interval from them."""
if (np.asanyarray(lower) > np.asanyarray(upper)).all():
warnings.warn(f"Invalid interval: upper bound {upper} "
f"is strictly less than lower bound {lower}.", RuntimeWarning)
return cls(lower, upper)
@classmethod
def validate(cls, interval):
"""
Construct and validate an interval
Parameters
----------
interval : iterable
A representation of the interval.
Returns
-------
A validated interval.
"""
cls._validate_shape(interval)
if len(interval) == 1:
interval = tuple(interval[0])
else:
interval = tuple(interval)
return cls._validate_bounds(interval[0], interval[1])
def outside(self, _input: np.ndarray):
"""
Parameters
----------
_input : np.ndarray
The evaluation input in the form of an array.
Returns
-------
Boolean array indicating which parts of _input are outside the interval:
True -> position outside interval
False -> position inside interval
"""
return np.logical_or(_input < self.lower, _input > self.upper)
def domain(self, resolution):
return np.arange(self.lower, self.upper + resolution, resolution)
# The interval where all ignored inputs can be found.
_ignored_interval = _Interval.validate((-np.inf, np.inf))
def get_index(model, key) -> int:
"""
Get the input index corresponding to the given key.
Can pass in either:
the string name of the input or
the input index itself.
"""
if isinstance(key, str):
if key in model.inputs:
index = model.inputs.index(key)
else:
raise ValueError(f"'{key}' is not one of the inputs: {model.inputs}.")
elif np.issubdtype(type(key), np.integer):
if 0 <= key < len(model.inputs):
index = key
else:
raise IndexError(f"Integer key: {key} must be non-negative and < {len(model.inputs)}.")
else:
raise ValueError(f"Key value: {key} must be string or integer.")
return index
def get_name(model, index: int):
"""Get the input name corresponding to the input index"""
return model.inputs[index]
class _BoundingDomain(abc.ABC):
"""
Base class for ModelBoundingBox and CompoundBoundingBox.
This is where all the `~astropy.modeling.core.Model` evaluation
code for evaluating with a bounding box is because it is common
to both types of bounding box.
Parameters
----------
model : `~astropy.modeling.Model`
The Model this bounding domain is for.
prepare_inputs :
Generates the necessary input information so that model can
be evaluated only for input points entirely inside bounding_box.
This needs to be implemented by a subclass. Note that most of
the implementation is in ModelBoundingBox.
prepare_outputs :
Fills the output values in for any input points outside the
bounding_box.
evaluate :
Performs a complete model evaluation while enforcing the bounds
on the inputs and returns a complete output.
"""
def __init__(self, model, ignored: List[int] = None, order: str = 'C'):
self._model = model
self._ignored = self._validate_ignored(ignored)
self._order = self._get_order(order)
@property
def model(self):
return self._model
@property
def order(self) -> str:
return self._order
@property
def ignored(self) -> List[int]:
return self._ignored
def _get_order(self, order: str = None) -> str:
"""
Get if bounding_box is C/python ordered or Fortran/mathematically
ordered
"""
if order is None:
order = self._order
if order not in ('C', 'F'):
raise ValueError("order must be either 'C' (C/python order) or "
f"'F' (Fortran/mathematical order), got: {order}.")
return order
def _get_index(self, key) -> int:
"""
Get the input index corresponding to the given key.
Can pass in either:
the string name of the input or
the input index itself.
"""
return get_index(self._model, key)
def _get_name(self, index: int):
"""Get the input name corresponding to the input index"""
return get_name(self._model, index)
@property
def ignored_inputs(self) -> List[str]:
return [self._get_name(index) for index in self._ignored]
def _validate_ignored(self, ignored: list) -> List[int]:
if ignored is None:
return []
else:
return [self._get_index(key) for key in ignored]
def __call__(self, *args, **kwargs):
raise NotImplementedError(
"This bounding box is fixed by the model and does not have "
"adjustable parameters.")
@abc.abstractmethod
def fix_inputs(self, model, fixed_inputs: dict):
"""
Fix the bounding_box for a `fix_inputs` compound model.
Parameters
----------
model : `~astropy.modeling.Model`
The new model for which this will be a bounding_box
fixed_inputs : dict
Dictionary of inputs which have been fixed by this bounding box.
"""
raise NotImplementedError("This should be implemented by a child class.")
@abc.abstractmethod
def prepare_inputs(self, input_shape, inputs) -> Tuple[Any, Any, Any]:
"""
Get prepare the inputs with respect to the bounding box.
Parameters
----------
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
inputs : list
List of all the model inputs
Returns
-------
valid_inputs : list
The inputs reduced to just those inputs which are all inside
their respective bounding box intervals
valid_index : array_like
array of all indices inside the bounding box
all_out: bool
if all of the inputs are outside the bounding_box
"""
raise NotImplementedError("This has not been implemented for BoundingDomain.")
@staticmethod
def _base_output(input_shape, fill_value):
"""
Create a baseline output, assuming that the entire input is outside
the bounding box
Parameters
----------
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
fill_value : float
The value which will be assigned to inputs which are outside
the bounding box
Returns
-------
An array of the correct shape containing all fill_value
"""
return np.zeros(input_shape) + fill_value
def _all_out_output(self, input_shape, fill_value):
"""
Create output if all inputs are outside the domain
Parameters
----------
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
fill_value : float
The value which will be assigned to inputs which are outside
the bounding box
Returns
-------
A full set of outputs for case that all inputs are outside domain.
"""
return [self._base_output(input_shape, fill_value)
for _ in range(self._model.n_outputs)], None
def _modify_output(self, valid_output, valid_index, input_shape, fill_value):
"""
For a single output fill in all the parts corresponding to inputs
outside the bounding box.
Parameters
----------
valid_output : numpy array
The output from the model corresponding to inputs inside the
bounding box
valid_index : numpy array
array of all indices of inputs inside the bounding box
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
fill_value : float
The value which will be assigned to inputs which are outside
the bounding box
Returns
-------
An output array with all the indices corresponding to inputs
outside the bounding box filled in by fill_value
"""
output = self._base_output(input_shape, fill_value)
if not output.shape:
output = np.array(valid_output)
else:
output[valid_index] = valid_output
if np.isscalar(valid_output):
output = output.item(0)
return output
def _prepare_outputs(self, valid_outputs, valid_index, input_shape, fill_value):
"""
Fill in all the outputs of the model corresponding to inputs
outside the bounding_box.
Parameters
----------
valid_outputs : list of numpy array
The list of outputs from the model corresponding to inputs
inside the bounding box
valid_index : numpy array
array of all indices of inputs inside the bounding box
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
fill_value : float
The value which will be assigned to inputs which are outside
the bounding box
Returns
-------
List of filled in output arrays.
"""
outputs = []
for valid_output in valid_outputs:
outputs.append(self._modify_output(valid_output, valid_index, input_shape, fill_value))
return outputs
def prepare_outputs(self, valid_outputs, valid_index, input_shape, fill_value):
"""
Fill in all the outputs of the model corresponding to inputs
outside the bounding_box, adjusting any single output model so that
its output becomes a list of containing that output.
Parameters
----------
valid_outputs : list
The list of outputs from the model corresponding to inputs
inside the bounding box
valid_index : array_like
array of all indices of inputs inside the bounding box
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
fill_value : float
The value which will be assigned to inputs which are outside
the bounding box
"""
if self._model.n_outputs == 1:
valid_outputs = [valid_outputs]
return self._prepare_outputs(valid_outputs, valid_index, input_shape, fill_value)
@staticmethod
def _get_valid_outputs_unit(valid_outputs, with_units: bool):
"""
Get the unit for outputs if one is required.
Parameters
----------
valid_outputs : list of numpy array
The list of outputs from the model corresponding to inputs
inside the bounding box
with_units : bool
whether or not a unit is required
"""
if with_units:
return getattr(valid_outputs, 'unit', None)
def _evaluate_model(self, evaluate: Callable, valid_inputs, valid_index,
input_shape, fill_value, with_units: bool):
"""
Evaluate the model using the given evaluate routine
Parameters
----------
evaluate : Callable
callable which takes in the valid inputs to evaluate model
valid_inputs : list of numpy arrays
The inputs reduced to just those inputs which are all inside
their respective bounding box intervals
valid_index : numpy array
array of all indices inside the bounding box
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
fill_value : float
The value which will be assigned to inputs which are outside
the bounding box
with_units : bool
whether or not a unit is required
Returns
-------
outputs :
list containing filled in output values
valid_outputs_unit :
the unit that will be attached to the outputs
"""
valid_outputs = evaluate(valid_inputs)
valid_outputs_unit = self._get_valid_outputs_unit(valid_outputs, with_units)
return self.prepare_outputs(valid_outputs, valid_index,
input_shape, fill_value), valid_outputs_unit
def _evaluate(self, evaluate: Callable, inputs, input_shape,
fill_value, with_units: bool):
"""
Perform model evaluation steps:
prepare_inputs -> evaluate -> prepare_outputs
Parameters
----------
evaluate : Callable
callable which takes in the valid inputs to evaluate model
valid_inputs : list of numpy arrays
The inputs reduced to just those inputs which are all inside
their respective bounding box intervals
valid_index : numpy array
array of all indices inside the bounding box
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
fill_value : float
The value which will be assigned to inputs which are outside
the bounding box
with_units : bool
whether or not a unit is required
Returns
-------
outputs :
list containing filled in output values
valid_outputs_unit :
the unit that will be attached to the outputs
"""
valid_inputs, valid_index, all_out = self.prepare_inputs(input_shape, inputs)
if all_out:
return self._all_out_output(input_shape, fill_value)
else:
return self._evaluate_model(evaluate, valid_inputs, valid_index,
input_shape, fill_value, with_units)
@staticmethod
def _set_outputs_unit(outputs, valid_outputs_unit):
"""
Set the units on the outputs
prepare_inputs -> evaluate -> prepare_outputs -> set output units
Parameters
----------
outputs :
list containing filled in output values
valid_outputs_unit :
the unit that will be attached to the outputs
Returns
-------
List containing filled in output values and units
"""
if valid_outputs_unit is not None:
return Quantity(outputs, valid_outputs_unit, copy=False)
return outputs
def evaluate(self, evaluate: Callable, inputs, fill_value):
"""
Perform full model evaluation steps:
prepare_inputs -> evaluate -> prepare_outputs -> set output units
Parameters
----------
evaluate : callable
callable which takes in the valid inputs to evaluate model
valid_inputs : list
The inputs reduced to just those inputs which are all inside
their respective bounding box intervals
valid_index : array_like
array of all indices inside the bounding box
fill_value : float
The value which will be assigned to inputs which are outside
the bounding box
"""
input_shape = self._model.input_shape(inputs)
# NOTE: CompoundModel does not currently support units during
# evaluation for bounding_box so this feature is turned off
# for CompoundModel(s).
outputs, valid_outputs_unit = self._evaluate(evaluate, inputs, input_shape,
fill_value, self._model.bbox_with_units)
return tuple(self._set_outputs_unit(outputs, valid_outputs_unit))
class ModelBoundingBox(_BoundingDomain):
"""
A model's bounding box
Parameters
----------
intervals : dict
A dictionary containing all the intervals for each model input
keys -> input index
values -> interval for that index
model : `~astropy.modeling.Model`
The Model this bounding_box is for.
ignored : list
A list containing all the inputs (index) which will not be
checked for whether or not their elements are in/out of an interval.
order : optional, str
The ordering that is assumed for the tuple representation of this
bounding_box. Options: 'C': C/Python order, e.g. z, y, x.
(default), 'F': Fortran/mathematical notation order, e.g. x, y, z.
"""
def __init__(self, intervals: Dict[int, _Interval], model,
ignored: List[int] = None, order: str = 'C'):
super().__init__(model, ignored, order)
self._intervals = {}
if intervals != () and intervals != {}:
self._validate(intervals, order=order)
def copy(self, ignored=None):
intervals = {index: interval.copy()
for index, interval in self._intervals.items()}
if ignored is None:
ignored = self._ignored.copy()
return ModelBoundingBox(intervals, self._model,
ignored=ignored,
order=self._order)
@property
def intervals(self) -> Dict[int, _Interval]:
"""Return bounding_box labeled using input positions"""
return self._intervals
@property
def named_intervals(self) -> Dict[str, _Interval]:
"""Return bounding_box labeled using input names"""
return {self._get_name(index): bbox for index, bbox in self._intervals.items()}
def __repr__(self):
parts = [
'ModelBoundingBox(',
' intervals={'
]
for name, interval in self.named_intervals.items():
parts.append(f" {name}: {interval}")
parts.append(' }')
if len(self._ignored) > 0:
parts.append(f" ignored={self.ignored_inputs}")
parts.append(f' model={self._model.__class__.__name__}(inputs={self._model.inputs})')
parts.append(f" order='{self._order}'")
parts.append(')')
return '\n'.join(parts)
def __len__(self):
return len(self._intervals)
def __contains__(self, key):
try:
return self._get_index(key) in self._intervals or self._ignored
except (IndexError, ValueError):
return False
def has_interval(self, key):
return self._get_index(key) in self._intervals
def __getitem__(self, key):
"""Get bounding_box entries by either input name or input index"""
index = self._get_index(key)
if index in self._ignored:
return _ignored_interval
else:
return self._intervals[self._get_index(key)]
def bounding_box(self, order: str = None):
"""
Return the old tuple of tuples representation of the bounding_box
order='C' corresponds to the old bounding_box ordering
order='F' corresponds to the gwcs bounding_box ordering.
"""
if len(self._intervals) == 1:
return tuple(list(self._intervals.values())[0])
else:
order = self._get_order(order)
inputs = self._model.inputs
if order == 'C':
inputs = inputs[::-1]
bbox = tuple(tuple(self[input_name]) for input_name in inputs)
if len(bbox) == 1:
bbox = bbox[0]
return bbox
def __eq__(self, value):
"""Note equality can be either with old representation or new one."""
if isinstance(value, tuple):
return self.bounding_box() == value
elif isinstance(value, ModelBoundingBox):
return (self.intervals == value.intervals) and (self.ignored == value.ignored)
else:
return False
def __setitem__(self, key, value):
"""Validate and store interval under key (input index or input name)."""
index = self._get_index(key)
if index in self._ignored:
self._ignored.remove(index)
self._intervals[index] = _Interval.validate(value)
def __delitem__(self, key):
"""Delete stored interval"""
index = self._get_index(key)
if index in self._ignored:
raise RuntimeError(f"Cannot delete ignored input: {key}!")
del self._intervals[index]
self._ignored.append(index)
def _validate_dict(self, bounding_box: dict):
"""Validate passing dictionary of intervals and setting them."""
for key, value in bounding_box.items():
self[key] = value
@property
def _available_input_index(self):
model_input_index = [self._get_index(_input) for _input in self._model.inputs]
return [_input for _input in model_input_index if _input not in self._ignored]
def _validate_sequence(self, bounding_box, order: str = None):
"""Validate passing tuple of tuples representation (or related) and setting them."""
order = self._get_order(order)
if order == 'C':
# If bounding_box is C/python ordered, it needs to be reversed
# to be in Fortran/mathematical/input order.
bounding_box = bounding_box[::-1]
for index, value in enumerate(bounding_box):
self[self._available_input_index[index]] = value
@property
def _n_inputs(self) -> int:
n_inputs = self._model.n_inputs - len(self._ignored)
if n_inputs > 0:
return n_inputs
else:
return 0
def _validate_iterable(self, bounding_box, order: str = None):
"""Validate and set any iterable representation"""
if len(bounding_box) != self._n_inputs:
raise ValueError(f"Found {len(bounding_box)} intervals, "
f"but must have exactly {self._n_inputs}.")
if isinstance(bounding_box, dict):
self._validate_dict(bounding_box)
else:
self._validate_sequence(bounding_box, order)
def _validate(self, bounding_box, order: str = None):
"""Validate and set any representation"""
if self._n_inputs == 1 and not isinstance(bounding_box, dict):
self[self._available_input_index[0]] = bounding_box
else:
self._validate_iterable(bounding_box, order)
@classmethod
def validate(cls, model, bounding_box,
ignored: list = None, order: str = 'C', _preserve_ignore: bool = False, **kwargs):
"""
Construct a valid bounding box for a model.
Parameters
----------
model : `~astropy.modeling.Model`
The model for which this will be a bounding_box
bounding_box : dict, tuple
A possible representation of the bounding box
order : optional, str
The order that a tuple representation will be assumed to be
Default: 'C'
"""
if isinstance(bounding_box, ModelBoundingBox):
order = bounding_box.order
if _preserve_ignore:
ignored = bounding_box.ignored
bounding_box = bounding_box.named_intervals
new = cls({}, model, ignored=ignored, order=order)
new._validate(bounding_box)
return new
def fix_inputs(self, model, fixed_inputs: dict, _keep_ignored=False):
"""
Fix the bounding_box for a `fix_inputs` compound model.
Parameters
----------
model : `~astropy.modeling.Model`
The new model for which this will be a bounding_box
fixed_inputs : dict
Dictionary of inputs which have been fixed by this bounding box.
keep_ignored : bool
Keep the ignored inputs of the bounding box (internal argument only)
"""
new = self.copy()
for _input in fixed_inputs.keys():
del new[_input]
if _keep_ignored:
ignored = new.ignored
else:
ignored = None
return ModelBoundingBox.validate(model, new.named_intervals,
ignored=ignored, order=new._order)
@property
def dimension(self):
return len(self)
def domain(self, resolution, order: str = None):
inputs = self._model.inputs
order = self._get_order(order)
if order == 'C':
inputs = inputs[::-1]
return [self[input_name].domain(resolution) for input_name in inputs]
def _outside(self, input_shape, inputs):
"""
Get all the input positions which are outside the bounding_box,
so that the corresponding outputs can be filled with the fill
value (default NaN).
Parameters
----------
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
inputs : list
List of all the model inputs
Returns
-------
outside_index : bool-numpy array
True -> position outside bounding_box
False -> position inside bounding_box
all_out : bool
if all of the inputs are outside the bounding_box
"""
all_out = False
outside_index = np.zeros(input_shape, dtype=bool)
for index, _input in enumerate(inputs):
_input = np.asanyarray(_input)
outside = np.broadcast_to(self[index].outside(_input), input_shape)
outside_index[outside] = True
if outside_index.all():
all_out = True
break
return outside_index, all_out
def _valid_index(self, input_shape, inputs):
"""
Get the indices of all the inputs inside the bounding_box.
Parameters
----------
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
inputs : list
List of all the model inputs
Returns
-------
valid_index : numpy array
array of all indices inside the bounding box
all_out : bool
if all of the inputs are outside the bounding_box
"""
outside_index, all_out = self._outside(input_shape, inputs)
valid_index = np.atleast_1d(np.logical_not(outside_index)).nonzero()
if len(valid_index[0]) == 0:
all_out = True
return valid_index, all_out
def prepare_inputs(self, input_shape, inputs) -> Tuple[Any, Any, Any]:
"""
Get prepare the inputs with respect to the bounding box.
Parameters
----------
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
inputs : list
List of all the model inputs
Returns
-------
valid_inputs : list
The inputs reduced to just those inputs which are all inside
their respective bounding box intervals
valid_index : array_like
array of all indices inside the bounding box
all_out: bool
if all of the inputs are outside the bounding_box
"""
valid_index, all_out = self._valid_index(input_shape, inputs)
valid_inputs = []
if not all_out:
for _input in inputs:
if input_shape:
valid_input = np.broadcast_to(np.atleast_1d(_input), input_shape)[valid_index]
if np.isscalar(_input):
valid_input = valid_input.item(0)
valid_inputs.append(valid_input)
else:
valid_inputs.append(_input)
return tuple(valid_inputs), valid_index, all_out
_BaseSelectorArgument = namedtuple('_BaseSelectorArgument', "index ignore")
class _SelectorArgument(_BaseSelectorArgument):
"""
Contains a single CompoundBoundingBox slicing input.
Parameters
----------
index : int
The index of the input in the input list
ignore : bool
Whether or not this input will be ignored by the bounding box.
Methods
-------
validate :
Returns a valid SelectorArgument for a given model.
get_selector :
Returns the value of the input for use in finding the correct
bounding_box.
get_fixed_value :
Gets the slicing value from a fix_inputs set of values.
"""
def __new__(cls, index, ignore):
self = super().__new__(cls, index, ignore)
return self
@classmethod
def validate(cls, model, argument, ignored: bool = True):
"""
Construct a valid selector argument for a CompoundBoundingBox.
Parameters
----------
model : `~astropy.modeling.Model`
The model for which this will be an argument for.
argument : int or str
A representation of which evaluation input to use
ignored : optional, bool
Whether or not to ignore this argument in the ModelBoundingBox.
Returns
-------
Validated selector_argument
"""
return cls(get_index(model, argument), ignored)
def get_selector(self, *inputs):
"""
Get the selector value corresponding to this argument
Parameters
----------
*inputs :
All the processed model evaluation inputs.
"""
_selector = inputs[self.index]
if isiterable(_selector):
if len(_selector) == 1:
return _selector[0]
else:
return tuple(_selector)
return _selector
def name(self, model) -> str:
"""
Get the name of the input described by this selector argument
Parameters
----------
model : `~astropy.modeling.Model`
The Model this selector argument is for.
"""
return get_name(model, self.index)
def pretty_repr(self, model):
"""
Get a pretty-print representation of this object
Parameters
----------
model : `~astropy.modeling.Model`
The Model this selector argument is for.
"""
return f"Argument(name='{self.name(model)}', ignore={self.ignore})"
def get_fixed_value(self, model, values: dict):
"""
Gets the value fixed input corresponding to this argument
Parameters
----------
model : `~astropy.modeling.Model`
The Model this selector argument is for.
values : dict
Dictionary of fixed inputs.
"""
if self.index in values:
return values[self.index]
else:
if self.name(model) in values:
return values[self.name(model)]
else:
raise RuntimeError(f"{self.pretty_repr(model)} was not found in {values}")
def is_argument(self, model, argument) -> bool:
"""
Determine if passed argument is described by this selector argument
Parameters
----------
model : `~astropy.modeling.Model`
The Model this selector argument is for.
argument : int or str
A representation of which evaluation input is being used
"""
return self.index == get_index(model, argument)
def named_tuple(self, model):
"""
Get a tuple representation of this argument using the input
name from the model.
Parameters
----------
model : `~astropy.modeling.Model`
The Model this selector argument is for.
"""
return (self.name(model), self.ignore)
class _SelectorArguments(tuple):
"""
Contains the CompoundBoundingBox slicing description
Parameters
----------
input_ :
The SelectorArgument values
Methods
-------
validate :
Returns a valid SelectorArguments for its model.
get_selector :
Returns the selector a set of inputs corresponds to.
is_selector :
Determines if a selector is correctly formatted for this CompoundBoundingBox.
get_fixed_value :
Gets the selector from a fix_inputs set of values.
"""
_kept_ignore = None
def __new__(cls, input_: Tuple[_SelectorArgument], kept_ignore: List = None):
self = super().__new__(cls, input_)
if kept_ignore is None:
self._kept_ignore = []
else:
self._kept_ignore = kept_ignore
return self
def pretty_repr(self, model):
"""
Get a pretty-print representation of this object
Parameters
----------
model : `~astropy.modeling.Model`
The Model these selector arguments are for.
"""
parts = ['SelectorArguments(']
for argument in self:
parts.append(
f" {argument.pretty_repr(model)}"
)
parts.append(')')
return '\n'.join(parts)
@property
def ignore(self):
"""Get the list of ignored inputs"""
ignore = [argument.index for argument in self if argument.ignore]
ignore.extend(self._kept_ignore)
return ignore
@property
def kept_ignore(self):
"""The arguments to persist in ignoring"""
return self._kept_ignore
@classmethod
def validate(cls, model, arguments, kept_ignore: List = None):
"""
Construct a valid Selector description for a CompoundBoundingBox.
Parameters
----------
model : `~astropy.modeling.Model`
The Model these selector arguments are for.
arguments :
The individual argument informations
kept_ignore :
Arguments to persist as ignored
"""
inputs = []
for argument in arguments:
_input = _SelectorArgument.validate(model, *argument)
if _input.index in [this.index for this in inputs]:
raise ValueError(f"Input: '{get_name(model, _input.index)}' has been repeated.")
inputs.append(_input)
if len(inputs) == 0:
raise ValueError("There must be at least one selector argument.")
if isinstance(arguments, _SelectorArguments):
if kept_ignore is None:
kept_ignore = []
kept_ignore.extend(arguments.kept_ignore)
return cls(tuple(inputs), kept_ignore)
def get_selector(self, *inputs):
"""
Get the selector corresponding to these inputs
Parameters
----------
*inputs :
All the processed model evaluation inputs.
"""
return tuple(argument.get_selector(*inputs) for argument in self)
def is_selector(self, _selector):
"""
Determine if this is a reasonable selector
Parameters
----------
_selector : tuple
The selector to check
"""
return isinstance(_selector, tuple) and len(_selector) == len(self)
def get_fixed_values(self, model, values: dict):
"""
Gets the value fixed input corresponding to this argument
Parameters
----------
model : `~astropy.modeling.Model`
The Model these selector arguments are for.
values : dict
Dictionary of fixed inputs.
"""
return tuple(argument.get_fixed_value(model, values) for argument in self)
def is_argument(self, model, argument) -> bool:
"""
Determine if passed argument is one of the selector arguments
Parameters
----------
model : `~astropy.modeling.Model`
The Model these selector arguments are for.
argument : int or str
A representation of which evaluation input is being used
"""
for selector_arg in self:
if selector_arg.is_argument(model, argument):
return True
else:
return False
def selector_index(self, model, argument):
"""
Get the index of the argument passed in the selector tuples
Parameters
----------
model : `~astropy.modeling.Model`
The Model these selector arguments are for.
argument : int or str
A representation of which argument is being used
"""
for index, selector_arg in enumerate(self):
if selector_arg.is_argument(model, argument):
return index
else:
raise ValueError(f"{argument} does not correspond to any selector argument.")
def reduce(self, model, argument):
"""
Reduce the selector arguments by the argument given
Parameters
----------
model : `~astropy.modeling.Model`
The Model these selector arguments are for.
argument : int or str
A representation of which argument is being used
"""
arguments = list(self)
kept_ignore = [arguments.pop(self.selector_index(model, argument)).index]
kept_ignore.extend(self._kept_ignore)
return _SelectorArguments.validate(model, tuple(arguments), kept_ignore)
def add_ignore(self, model, argument):
"""
Add argument to the kept_ignore list
Parameters
----------
model : `~astropy.modeling.Model`
The Model these selector arguments are for.
argument : int or str
A representation of which argument is being used
"""
if self.is_argument(model, argument):
raise ValueError(f"{argument}: is a selector argument and cannot be ignored.")
kept_ignore = [get_index(model, argument)]
return _SelectorArguments.validate(model, self, kept_ignore)
def named_tuple(self, model):
"""
Get a tuple of selector argument tuples using input names
Parameters
----------
model : `~astropy.modeling.Model`
The Model these selector arguments are for.
"""
return tuple(selector_arg.named_tuple(model) for selector_arg in self)
class CompoundBoundingBox(_BoundingDomain):
"""
A model's compound bounding box
Parameters
----------
bounding_boxes : dict
A dictionary containing all the ModelBoundingBoxes that are possible
keys -> _selector (extracted from model inputs)
values -> ModelBoundingBox
model : `~astropy.modeling.Model`
The Model this compound bounding_box is for.
selector_args : _SelectorArguments
A description of how to extract the selectors from model inputs.
create_selector : optional
A method which takes in the selector and the model to return a
valid bounding corresponding to that selector. This can be used
to construct new bounding_boxes for previously undefined selectors.
These new boxes are then stored for future lookups.
order : optional, str
The ordering that is assumed for the tuple representation of the
bounding_boxes.
"""
def __init__(self, bounding_boxes: Dict[Any, ModelBoundingBox], model,
selector_args: _SelectorArguments, create_selector: Callable = None,
ignored: List[int] = None, order: str = 'C'):
super().__init__(model, ignored, order)
self._create_selector = create_selector
self._selector_args = _SelectorArguments.validate(model, selector_args)
self._bounding_boxes = {}
self._validate(bounding_boxes)
def copy(self):
bounding_boxes = {selector: bbox.copy(self.selector_args.ignore)
for selector, bbox in self._bounding_boxes.items()}
return CompoundBoundingBox(bounding_boxes, self._model,
selector_args=self._selector_args,
create_selector=copy.deepcopy(self._create_selector),
order=self._order)
def __repr__(self):
parts = ['CompoundBoundingBox(',
' bounding_boxes={']
# bounding_boxes
for _selector, bbox in self._bounding_boxes.items():
bbox_repr = bbox.__repr__().split('\n')
parts.append(f" {_selector} = {bbox_repr.pop(0)}")
for part in bbox_repr:
parts.append(f" {part}")
parts.append(' }')
# selector_args
selector_args_repr = self.selector_args.pretty_repr(self._model).split('\n')
parts.append(f" selector_args = {selector_args_repr.pop(0)}")
for part in selector_args_repr:
parts.append(f" {part}")
parts.append(')')
return '\n'.join(parts)
@property
def bounding_boxes(self) -> Dict[Any, ModelBoundingBox]:
return self._bounding_boxes
@property
def selector_args(self) -> _SelectorArguments:
return self._selector_args
@selector_args.setter
def selector_args(self, value):
self._selector_args = _SelectorArguments.validate(self._model, value)
warnings.warn("Overriding selector_args may cause problems you should re-validate "
"the compound bounding box before use!", RuntimeWarning)
@property
def named_selector_tuple(self) -> tuple:
return self._selector_args.named_tuple(self._model)
@property
def create_selector(self):
return self._create_selector
@staticmethod
def _get_selector_key(key):
if isiterable(key):
return tuple(key)
else:
return (key,)
def __setitem__(self, key, value):
_selector = self._get_selector_key(key)
if not self.selector_args.is_selector(_selector):
raise ValueError(f"{_selector} is not a selector!")
ignored = self.selector_args.ignore + self.ignored
self._bounding_boxes[_selector] = ModelBoundingBox.validate(self._model, value,
ignored,
order=self._order)
def _validate(self, bounding_boxes: dict):
for _selector, bounding_box in bounding_boxes.items():
self[_selector] = bounding_box
def __eq__(self, value):
if isinstance(value, CompoundBoundingBox):
return (self.bounding_boxes == value.bounding_boxes and
self.selector_args == value.selector_args and
self.create_selector == value.create_selector)
else:
return False
@classmethod
def validate(cls, model, bounding_box: dict, selector_args=None, create_selector=None,
ignored: list = None, order: str = 'C', _preserve_ignore: bool = False, **kwarg):
"""
Construct a valid compound bounding box for a model.
Parameters
----------
model : `~astropy.modeling.Model`
The model for which this will be a bounding_box
bounding_box : dict
Dictionary of possible bounding_box respresentations
selector_args : optional
Description of the selector arguments
create_selector : optional, callable
Method for generating new selectors
order : optional, str
The order that a tuple representation will be assumed to be
Default: 'C'
"""
if isinstance(bounding_box, CompoundBoundingBox):
if selector_args is None:
selector_args = bounding_box.selector_args
if create_selector is None:
create_selector = bounding_box.create_selector
order = bounding_box.order
if _preserve_ignore:
ignored = bounding_box.ignored
bounding_box = bounding_box.bounding_boxes
if selector_args is None:
raise ValueError("Selector arguments must be provided "
"(can be passed as part of bounding_box argument)")
return cls(bounding_box, model, selector_args,
create_selector=create_selector, ignored=ignored, order=order)
def __contains__(self, key):
return key in self._bounding_boxes
def _create_bounding_box(self, _selector):
self[_selector] = self._create_selector(_selector, model=self._model)
return self[_selector]
def __getitem__(self, key):
_selector = self._get_selector_key(key)
if _selector in self:
return self._bounding_boxes[_selector]
elif self._create_selector is not None:
return self._create_bounding_box(_selector)
else:
raise RuntimeError(f"No bounding box is defined for selector: {_selector}.")
def _select_bounding_box(self, inputs) -> ModelBoundingBox:
_selector = self.selector_args.get_selector(*inputs)
return self[_selector]
def prepare_inputs(self, input_shape, inputs) -> Tuple[Any, Any, Any]:
"""
Get prepare the inputs with respect to the bounding box.
Parameters
----------
input_shape : tuple
The shape that all inputs have be reshaped/broadcasted into
inputs : list
List of all the model inputs
Returns
-------
valid_inputs : list
The inputs reduced to just those inputs which are all inside
their respective bounding box intervals
valid_index : array_like
array of all indices inside the bounding box
all_out: bool
if all of the inputs are outside the bounding_box
"""
bounding_box = self._select_bounding_box(inputs)
return bounding_box.prepare_inputs(input_shape, inputs)
def _matching_bounding_boxes(self, argument, value) -> Dict[Any, ModelBoundingBox]:
selector_index = self.selector_args.selector_index(self._model, argument)
matching = {}
for selector_key, bbox in self._bounding_boxes.items():
if selector_key[selector_index] == value:
new_selector_key = list(selector_key)
new_selector_key.pop(selector_index)
if bbox.has_interval(argument):
new_bbox = bbox.fix_inputs(self._model, {argument: value},
_keep_ignored=True)
else:
new_bbox = bbox.copy()
matching[tuple(new_selector_key)] = new_bbox
if len(matching) == 0:
raise ValueError(f"Attempting to fix input {argument}, but there are no "
f"bounding boxes for argument value {value}.")
return matching
def _fix_input_selector_arg(self, argument, value):
matching_bounding_boxes = self._matching_bounding_boxes(argument, value)
if len(self.selector_args) == 1:
return matching_bounding_boxes[()]
else:
return CompoundBoundingBox(matching_bounding_boxes, self._model,
self.selector_args.reduce(self._model, argument))
def _fix_input_bbox_arg(self, argument, value):
bounding_boxes = {}
for selector_key, bbox in self._bounding_boxes.items():
bounding_boxes[selector_key] = bbox.fix_inputs(self._model, {argument: value},
_keep_ignored=True)
return CompoundBoundingBox(bounding_boxes, self._model,
self.selector_args.add_ignore(self._model, argument))
def fix_inputs(self, model, fixed_inputs: dict):
"""
Fix the bounding_box for a `fix_inputs` compound model.
Parameters
----------
model : `~astropy.modeling.Model`
The new model for which this will be a bounding_box
fixed_inputs : dict
Dictionary of inputs which have been fixed by this bounding box.
"""
fixed_input_keys = list(fixed_inputs.keys())
argument = fixed_input_keys.pop()
value = fixed_inputs[argument]
if self.selector_args.is_argument(self._model, argument):
bbox = self._fix_input_selector_arg(argument, value)
else:
bbox = self._fix_input_bbox_arg(argument, value)
if len(fixed_input_keys) > 0:
new_fixed_inputs = fixed_inputs.copy()
del new_fixed_inputs[argument]
bbox = bbox.fix_inputs(model, new_fixed_inputs)
if isinstance(bbox, CompoundBoundingBox):
selector_args = bbox.named_selector_tuple
bbox_dict = bbox
elif isinstance(bbox, ModelBoundingBox):
selector_args = None
bbox_dict = bbox.named_intervals
return bbox.__class__.validate(model, bbox_dict,
order=bbox.order, selector_args=selector_args)
|
c53be2a8b4e6b85d2da43b3804a73f67ad534dfdad3922db288c16345c306dee | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Mathematical models."""
# pylint: disable=line-too-long, too-many-lines, too-many-arguments, invalid-name
import numpy as np
from astropy import units as u
from astropy.units import Quantity, UnitsError
from .core import Fittable1DModel, Fittable2DModel
from .parameters import InputParameterError, Parameter
from .utils import ellipse_extent
__all__ = ['AiryDisk2D', 'Moffat1D', 'Moffat2D', 'Box1D', 'Box2D', 'Const1D',
'Const2D', 'Ellipse2D', 'Disk2D', 'Gaussian1D', 'Gaussian2D',
'Linear1D', 'Lorentz1D', 'RickerWavelet1D', 'RickerWavelet2D',
'RedshiftScaleFactor', 'Multiply', 'Planar2D', 'Scale',
'Sersic1D', 'Sersic2D', 'Shift',
'Sine1D', 'Cosine1D', 'Tangent1D',
'ArcSine1D', 'ArcCosine1D', 'ArcTangent1D',
'Trapezoid1D', 'TrapezoidDisk2D', 'Ring2D', 'Voigt1D',
'KingProjectedAnalytic1D', 'Exponential1D', 'Logarithmic1D']
TWOPI = 2 * np.pi
FLOAT_EPSILON = float(np.finfo(np.float32).tiny)
# Note that we define this here rather than using the value defined in
# astropy.stats to avoid importing astropy.stats every time astropy.modeling
# is loaded.
GAUSSIAN_SIGMA_TO_FWHM = 2.0 * np.sqrt(2.0 * np.log(2.0))
class Gaussian1D(Fittable1DModel):
"""
One dimensional Gaussian model.
Parameters
----------
amplitude : float or `~astropy.units.Quantity`.
Amplitude (peak value) of the Gaussian - for a normalized profile
(integrating to 1), set amplitude = 1 / (stddev * np.sqrt(2 * np.pi))
mean : float or `~astropy.units.Quantity`.
Mean of the Gaussian.
stddev : float or `~astropy.units.Quantity`.
Standard deviation of the Gaussian with FWHM = 2 * stddev * np.sqrt(2 * np.log(2)).
Notes
-----
Either all or none of input ``x``, ``mean`` and ``stddev`` must be provided
consistently with compatible units or as unitless numbers.
Model formula:
.. math:: f(x) = A e^{- \\frac{\\left(x - x_{0}\\right)^{2}}{2 \\sigma^{2}}}
Examples
--------
>>> from astropy.modeling import models
>>> def tie_center(model):
... mean = 50 * model.stddev
... return mean
>>> tied_parameters = {'mean': tie_center}
Specify that 'mean' is a tied parameter in one of two ways:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... tied=tied_parameters)
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.mean.tied
False
>>> g1.mean.tied = tie_center
>>> g1.mean.tied
<function tie_center at 0x...>
Fixed parameters:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... fixed={'stddev': True})
>>> g1.stddev.fixed
True
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.stddev.fixed
False
>>> g1.stddev.fixed = True
>>> g1.stddev.fixed
True
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Gaussian1D
plt.figure()
s1 = Gaussian1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
See Also
--------
Gaussian2D, Box1D, Moffat1D, Lorentz1D
"""
amplitude = Parameter(default=1, description="Amplitude (peak value) of the Gaussian")
mean = Parameter(default=0, description="Position of peak (Gaussian)")
# Ensure stddev makes sense if its bounds are not explicitly set.
# stddev must be non-zero and positive.
stddev = Parameter(default=1, bounds=(FLOAT_EPSILON, None),
description="Standard deviation of the Gaussian")
def bounding_box(self, factor=5.5):
"""
Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``
Parameters
----------
factor : float
The multiple of `stddev` used to define the limits.
The default is 5.5, corresponding to a relative error < 1e-7.
Examples
--------
>>> from astropy.modeling.models import Gaussian1D
>>> model = Gaussian1D(mean=0, stddev=2)
>>> model.bounding_box
ModelBoundingBox(
intervals={
x: Interval(lower=-11.0, upper=11.0)
}
model=Gaussian1D(inputs=('x',))
order='C'
)
This range can be set directly (see: `Model.bounding_box
<astropy.modeling.Model.bounding_box>`) or by using a different factor,
like:
>>> model.bounding_box = model.bounding_box(factor=2)
>>> model.bounding_box
ModelBoundingBox(
intervals={
x: Interval(lower=-4.0, upper=4.0)
}
model=Gaussian1D(inputs=('x',))
order='C'
)
"""
x0 = self.mean
dx = factor * self.stddev
return (x0 - dx, x0 + dx)
@property
def fwhm(self):
"""Gaussian full width at half maximum."""
return self.stddev * GAUSSIAN_SIGMA_TO_FWHM
@staticmethod
def evaluate(x, amplitude, mean, stddev):
"""
Gaussian1D model function.
"""
return amplitude * np.exp(- 0.5 * (x - mean) ** 2 / stddev ** 2)
@staticmethod
def fit_deriv(x, amplitude, mean, stddev):
"""
Gaussian1D model function derivatives.
"""
d_amplitude = np.exp(-0.5 / stddev ** 2 * (x - mean) ** 2)
d_mean = amplitude * d_amplitude * (x - mean) / stddev ** 2
d_stddev = amplitude * d_amplitude * (x - mean) ** 2 / stddev ** 3
return [d_amplitude, d_mean, d_stddev]
@property
def input_units(self):
if self.mean.unit is None:
return None
return {self.inputs[0]: self.mean.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'mean': inputs_unit[self.inputs[0]],
'stddev': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Gaussian2D(Fittable2DModel):
r"""
Two dimensional Gaussian model.
Parameters
----------
amplitude : float or `~astropy.units.Quantity`.
Amplitude (peak value) of the Gaussian.
x_mean : float or `~astropy.units.Quantity`.
Mean of the Gaussian in x.
y_mean : float or `~astropy.units.Quantity`.
Mean of the Gaussian in y.
x_stddev : float or `~astropy.units.Quantity` or None.
Standard deviation of the Gaussian in x before rotating by theta. Must
be None if a covariance matrix (``cov_matrix``) is provided. If no
``cov_matrix`` is given, ``None`` means the default value (1).
y_stddev : float or `~astropy.units.Quantity` or None.
Standard deviation of the Gaussian in y before rotating by theta. Must
be None if a covariance matrix (``cov_matrix``) is provided. If no
``cov_matrix`` is given, ``None`` means the default value (1).
theta : float or `~astropy.units.Quantity`, optional.
The rotation angle as an angular quantity
(`~astropy.units.Quantity` or `~astropy.coordinates.Angle`)
or a value in radians (as a float). The rotation angle
increases counterclockwise. Must be `None` if a covariance matrix
(``cov_matrix``) is provided. If no ``cov_matrix`` is given,
`None` means the default value (0).
cov_matrix : ndarray, optional
A 2x2 covariance matrix. If specified, overrides the ``x_stddev``,
``y_stddev``, and ``theta`` defaults.
Notes
-----
Either all or none of input ``x, y``, ``[x,y]_mean`` and ``[x,y]_stddev``
must be provided consistently with compatible units or as unitless numbers.
Model formula:
.. math::
f(x, y) = A e^{-a\left(x - x_{0}\right)^{2} -b\left(x - x_{0}\right)
\left(y - y_{0}\right) -c\left(y - y_{0}\right)^{2}}
Using the following definitions:
.. math::
a = \left(\frac{\cos^{2}{\left (\theta \right )}}{2 \sigma_{x}^{2}} +
\frac{\sin^{2}{\left (\theta \right )}}{2 \sigma_{y}^{2}}\right)
b = \left(\frac{\sin{\left (2 \theta \right )}}{2 \sigma_{x}^{2}} -
\frac{\sin{\left (2 \theta \right )}}{2 \sigma_{y}^{2}}\right)
c = \left(\frac{\sin^{2}{\left (\theta \right )}}{2 \sigma_{x}^{2}} +
\frac{\cos^{2}{\left (\theta \right )}}{2 \sigma_{y}^{2}}\right)
If using a ``cov_matrix``, the model is of the form:
.. math::
f(x, y) = A e^{-0.5 \left(
\vec{x} - \vec{x}_{0}\right)^{T} \Sigma^{-1} \left(\vec{x} - \vec{x}_{0}
\right)}
where :math:`\vec{x} = [x, y]`, :math:`\vec{x}_{0} = [x_{0}, y_{0}]`,
and :math:`\Sigma` is the covariance matrix:
.. math::
\Sigma = \left(\begin{array}{ccc}
\sigma_x^2 & \rho \sigma_x \sigma_y \\
\rho \sigma_x \sigma_y & \sigma_y^2
\end{array}\right)
:math:`\rho` is the correlation between ``x`` and ``y``, which should
be between -1 and +1. Positive correlation corresponds to a
``theta`` in the range 0 to 90 degrees. Negative correlation
corresponds to a ``theta`` in the range of 0 to -90 degrees.
See [1]_ for more details about the 2D Gaussian function.
See Also
--------
Gaussian1D, Box2D, Moffat2D
References
----------
.. [1] https://en.wikipedia.org/wiki/Gaussian_function
"""
amplitude = Parameter(default=1, description="Amplitude of the Gaussian")
x_mean = Parameter(default=0, description="Peak position (along x axis) of Gaussian")
y_mean = Parameter(default=0, description="Peak position (along y axis) of Gaussian")
x_stddev = Parameter(default=1, description="Standard deviation of the Gaussian (along x axis)")
y_stddev = Parameter(default=1, description="Standard deviation of the Gaussian (along y axis)")
theta = Parameter(default=0.0, description=("Rotation angle either as a "
"float (in radians) or a "
"|Quantity| angle (optional)"))
def __init__(self, amplitude=amplitude.default, x_mean=x_mean.default,
y_mean=y_mean.default, x_stddev=None, y_stddev=None,
theta=None, cov_matrix=None, **kwargs):
if cov_matrix is None:
if x_stddev is None:
x_stddev = self.__class__.x_stddev.default
if y_stddev is None:
y_stddev = self.__class__.y_stddev.default
if theta is None:
theta = self.__class__.theta.default
else:
if x_stddev is not None or y_stddev is not None or theta is not None:
raise InputParameterError("Cannot specify both cov_matrix and "
"x/y_stddev/theta")
# Compute principle coordinate system transformation
cov_matrix = np.array(cov_matrix)
if cov_matrix.shape != (2, 2):
raise ValueError("Covariance matrix must be 2x2")
eig_vals, eig_vecs = np.linalg.eig(cov_matrix)
x_stddev, y_stddev = np.sqrt(eig_vals)
y_vec = eig_vecs[:, 0]
theta = np.arctan2(y_vec[1], y_vec[0])
# Ensure stddev makes sense if its bounds are not explicitly set.
# stddev must be non-zero and positive.
# TODO: Investigate why setting this in Parameter above causes
# convolution tests to hang.
kwargs.setdefault('bounds', {})
kwargs['bounds'].setdefault('x_stddev', (FLOAT_EPSILON, None))
kwargs['bounds'].setdefault('y_stddev', (FLOAT_EPSILON, None))
super().__init__(
amplitude=amplitude, x_mean=x_mean, y_mean=y_mean,
x_stddev=x_stddev, y_stddev=y_stddev, theta=theta, **kwargs)
@property
def x_fwhm(self):
"""Gaussian full width at half maximum in X."""
return self.x_stddev * GAUSSIAN_SIGMA_TO_FWHM
@property
def y_fwhm(self):
"""Gaussian full width at half maximum in Y."""
return self.y_stddev * GAUSSIAN_SIGMA_TO_FWHM
def bounding_box(self, factor=5.5):
"""
Tuple defining the default ``bounding_box`` limits in each dimension,
``((y_low, y_high), (x_low, x_high))``
The default offset from the mean is 5.5-sigma, corresponding
to a relative error < 1e-7. The limits are adjusted for rotation.
Parameters
----------
factor : float, optional
The multiple of `x_stddev` and `y_stddev` used to define the limits.
The default is 5.5.
Examples
--------
>>> from astropy.modeling.models import Gaussian2D
>>> model = Gaussian2D(x_mean=0, y_mean=0, x_stddev=1, y_stddev=2)
>>> model.bounding_box
ModelBoundingBox(
intervals={
x: Interval(lower=-5.5, upper=5.5)
y: Interval(lower=-11.0, upper=11.0)
}
model=Gaussian2D(inputs=('x', 'y'))
order='C'
)
This range can be set directly (see: `Model.bounding_box
<astropy.modeling.Model.bounding_box>`) or by using a different factor
like:
>>> model.bounding_box = model.bounding_box(factor=2)
>>> model.bounding_box
ModelBoundingBox(
intervals={
x: Interval(lower=-2.0, upper=2.0)
y: Interval(lower=-4.0, upper=4.0)
}
model=Gaussian2D(inputs=('x', 'y'))
order='C'
)
"""
a = factor * self.x_stddev
b = factor * self.y_stddev
dx, dy = ellipse_extent(a, b, self.theta)
return ((self.y_mean - dy, self.y_mean + dy),
(self.x_mean - dx, self.x_mean + dx))
@staticmethod
def evaluate(x, y, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta):
"""Two dimensional Gaussian function"""
cost2 = np.cos(theta) ** 2
sint2 = np.sin(theta) ** 2
sin2t = np.sin(2. * theta)
xstd2 = x_stddev ** 2
ystd2 = y_stddev ** 2
xdiff = x - x_mean
ydiff = y - y_mean
a = 0.5 * ((cost2 / xstd2) + (sint2 / ystd2))
b = 0.5 * ((sin2t / xstd2) - (sin2t / ystd2))
c = 0.5 * ((sint2 / xstd2) + (cost2 / ystd2))
return amplitude * np.exp(-((a * xdiff ** 2) + (b * xdiff * ydiff) +
(c * ydiff ** 2)))
@staticmethod
def fit_deriv(x, y, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta):
"""Two dimensional Gaussian function derivative with respect to parameters"""
cost = np.cos(theta)
sint = np.sin(theta)
cost2 = np.cos(theta) ** 2
sint2 = np.sin(theta) ** 2
cos2t = np.cos(2. * theta)
sin2t = np.sin(2. * theta)
xstd2 = x_stddev ** 2
ystd2 = y_stddev ** 2
xstd3 = x_stddev ** 3
ystd3 = y_stddev ** 3
xdiff = x - x_mean
ydiff = y - y_mean
xdiff2 = xdiff ** 2
ydiff2 = ydiff ** 2
a = 0.5 * ((cost2 / xstd2) + (sint2 / ystd2))
b = 0.5 * ((sin2t / xstd2) - (sin2t / ystd2))
c = 0.5 * ((sint2 / xstd2) + (cost2 / ystd2))
g = amplitude * np.exp(-((a * xdiff2) + (b * xdiff * ydiff) +
(c * ydiff2)))
da_dtheta = (sint * cost * ((1. / ystd2) - (1. / xstd2)))
da_dx_stddev = -cost2 / xstd3
da_dy_stddev = -sint2 / ystd3
db_dtheta = (cos2t / xstd2) - (cos2t / ystd2)
db_dx_stddev = -sin2t / xstd3
db_dy_stddev = sin2t / ystd3
dc_dtheta = -da_dtheta
dc_dx_stddev = -sint2 / xstd3
dc_dy_stddev = -cost2 / ystd3
dg_dA = g / amplitude
dg_dx_mean = g * ((2. * a * xdiff) + (b * ydiff))
dg_dy_mean = g * ((b * xdiff) + (2. * c * ydiff))
dg_dx_stddev = g * (-(da_dx_stddev * xdiff2 +
db_dx_stddev * xdiff * ydiff +
dc_dx_stddev * ydiff2))
dg_dy_stddev = g * (-(da_dy_stddev * xdiff2 +
db_dy_stddev * xdiff * ydiff +
dc_dy_stddev * ydiff2))
dg_dtheta = g * (-(da_dtheta * xdiff2 +
db_dtheta * xdiff * ydiff +
dc_dtheta * ydiff2))
return [dg_dA, dg_dx_mean, dg_dy_mean, dg_dx_stddev, dg_dy_stddev,
dg_dtheta]
@property
def input_units(self):
if self.x_mean.unit is None and self.y_mean.unit is None:
return None
return {self.inputs[0]: self.x_mean.unit,
self.inputs[1]: self.y_mean.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {'x_mean': inputs_unit[self.inputs[0]],
'y_mean': inputs_unit[self.inputs[0]],
'x_stddev': inputs_unit[self.inputs[0]],
'y_stddev': inputs_unit[self.inputs[0]],
'theta': u.rad,
'amplitude': outputs_unit[self.outputs[0]]}
class Shift(Fittable1DModel):
"""
Shift a coordinate.
Parameters
----------
offset : float
Offset to add to a coordinate.
"""
offset = Parameter(default=0, description="Offset to add to a model")
linear = True
_has_inverse_bounding_box = True
@property
def input_units(self):
if self.offset.unit is None:
return None
return {self.inputs[0]: self.offset.unit}
@property
def inverse(self):
"""One dimensional inverse Shift model function"""
inv = self.copy()
inv.offset *= -1
try:
self.bounding_box
except NotImplementedError:
pass
else:
inv.bounding_box = tuple(self.evaluate(x, self.offset) for x in self.bounding_box)
return inv
@staticmethod
def evaluate(x, offset):
"""One dimensional Shift model function"""
return x + offset
@staticmethod
def sum_of_implicit_terms(x):
"""Evaluate the implicit term (x) of one dimensional Shift model"""
return x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional Shift model derivative with respect to parameter"""
d_offset = np.ones_like(x)
return [d_offset]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'offset': outputs_unit[self.outputs[0]]}
class Scale(Fittable1DModel):
"""
Multiply a model by a dimensionless factor.
Parameters
----------
factor : float
Factor by which to scale a coordinate.
Notes
-----
If ``factor`` is a `~astropy.units.Quantity` then the units will be
stripped before the scaling operation.
"""
factor = Parameter(default=1, description="Factor by which to scale a model")
linear = True
fittable = True
_input_units_strict = True
_input_units_allow_dimensionless = True
_has_inverse_bounding_box = True
@property
def input_units(self):
if self.factor.unit is None:
return None
return {self.inputs[0]: self.factor.unit}
@property
def inverse(self):
"""One dimensional inverse Scale model function"""
inv = self.copy()
inv.factor = 1 / self.factor
try:
self.bounding_box
except NotImplementedError:
pass
else:
inv.bounding_box = tuple(self.evaluate(x, self.factor)
for x in self.bounding_box.bounding_box())
return inv
@staticmethod
def evaluate(x, factor):
"""One dimensional Scale model function"""
if isinstance(factor, u.Quantity):
factor = factor.value
return factor * x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional Scale model derivative with respect to parameter"""
d_factor = x
return [d_factor]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'factor': outputs_unit[self.outputs[0]]}
class Multiply(Fittable1DModel):
"""
Multiply a model by a quantity or number.
Parameters
----------
factor : float
Factor by which to multiply a coordinate.
"""
factor = Parameter(default=1, description="Factor by which to multiply a model")
linear = True
fittable = True
_has_inverse_bounding_box = True
@property
def inverse(self):
"""One dimensional inverse multiply model function"""
inv = self.copy()
inv.factor = 1 / self.factor
try:
self.bounding_box
except NotImplementedError:
pass
else:
inv.bounding_box = tuple(self.evaluate(x, self.factor)
for x in self.bounding_box.bounding_box())
return inv
@staticmethod
def evaluate(x, factor):
"""One dimensional multiply model function"""
return factor * x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional multiply model derivative with respect to parameter"""
d_factor = x
return [d_factor]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'factor': outputs_unit[self.outputs[0]]}
class RedshiftScaleFactor(Fittable1DModel):
"""
One dimensional redshift scale factor model.
Parameters
----------
z : float
Redshift value.
Notes
-----
Model formula:
.. math:: f(x) = x (1 + z)
"""
z = Parameter(description='Redshift', default=0)
_has_inverse_bounding_box = True
@staticmethod
def evaluate(x, z):
"""One dimensional RedshiftScaleFactor model function"""
return (1 + z) * x
@staticmethod
def fit_deriv(x, z):
"""One dimensional RedshiftScaleFactor model derivative"""
d_z = x
return [d_z]
@property
def inverse(self):
"""Inverse RedshiftScaleFactor model"""
inv = self.copy()
inv.z = 1.0 / (1.0 + self.z) - 1.0
try:
self.bounding_box
except NotImplementedError:
pass
else:
inv.bounding_box = tuple(self.evaluate(x, self.z)
for x in self.bounding_box.bounding_box())
return inv
class Sersic1D(Fittable1DModel):
r"""
One dimensional Sersic surface brightness profile.
Parameters
----------
amplitude : float
Surface brightness at r_eff.
r_eff : float
Effective (half-light) radius
n : float
Sersic Index.
See Also
--------
Gaussian1D, Moffat1D, Lorentz1D
Notes
-----
Model formula:
.. math::
I(r)=I_e\exp\left\{-b_n\left[\left(\frac{r}{r_{e}}\right)^{(1/n)}-1\right]\right\}
The constant :math:`b_n` is defined such that :math:`r_e` contains half the total
luminosity, and can be solved for numerically.
.. math::
\Gamma(2n) = 2\gamma (b_n,2n)
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Sersic1D
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(111, xscale='log', yscale='log')
s1 = Sersic1D(amplitude=1, r_eff=5)
r=np.arange(0, 100, .01)
for n in range(1, 10):
s1.n = n
plt.plot(r, s1(r), color=str(float(n) / 15))
plt.axis([1e-1, 30, 1e-2, 1e3])
plt.xlabel('log Radius')
plt.ylabel('log Surface Brightness')
plt.text(.25, 1.5, 'n=1')
plt.text(.25, 300, 'n=10')
plt.xticks([])
plt.yticks([])
plt.show()
References
----------
.. [1] http://ned.ipac.caltech.edu/level5/March05/Graham/Graham2.html
"""
amplitude = Parameter(default=1, description="Surface brightness at r_eff")
r_eff = Parameter(default=1, description="Effective (half-light) radius")
n = Parameter(default=4, description="Sersic Index")
_gammaincinv = None
@classmethod
def evaluate(cls, r, amplitude, r_eff, n):
"""One dimensional Sersic profile function."""
if cls._gammaincinv is None:
from scipy.special import gammaincinv
cls._gammaincinv = gammaincinv
return (amplitude * np.exp(
-cls._gammaincinv(2 * n, 0.5) * ((r / r_eff) ** (1 / n) - 1)))
@property
def input_units(self):
if self.r_eff.unit is None:
return None
return {self.inputs[0]: self.r_eff.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'r_eff': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class _Trigonometric1D(Fittable1DModel):
"""
Base class for one dimensional trigonometric and inverse trigonometric models
Parameters
----------
amplitude : float
Oscillation amplitude
frequency : float
Oscillation frequency
phase : float
Oscillation phase
"""
amplitude = Parameter(default=1, description="Oscillation amplitude")
frequency = Parameter(default=1, description="Oscillation frequency")
phase = Parameter(default=0, description="Oscillation phase")
@property
def input_units(self):
if self.frequency.unit is None:
return None
return {self.inputs[0]: 1. / self.frequency.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'frequency': inputs_unit[self.inputs[0]] ** -1,
'amplitude': outputs_unit[self.outputs[0]]}
class Sine1D(_Trigonometric1D):
"""
One dimensional Sine model.
Parameters
----------
amplitude : float
Oscillation amplitude
frequency : float
Oscillation frequency
phase : float
Oscillation phase
See Also
--------
ArcSine1D, Cosine1D, Tangent1D, Const1D, Linear1D
Notes
-----
Model formula:
.. math:: f(x) = A \\sin(2 \\pi f x + 2 \\pi p)
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Sine1D
plt.figure()
s1 = Sine1D(amplitude=1, frequency=.25)
r=np.arange(0, 10, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([0, 10, -5, 5])
plt.show()
"""
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional Sine model function"""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = TWOPI * (frequency * x + phase)
if isinstance(argument, Quantity):
argument = argument.value
return amplitude * np.sin(argument)
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional Sine model derivative"""
d_amplitude = np.sin(TWOPI * frequency * x + TWOPI * phase)
d_frequency = (TWOPI * x * amplitude *
np.cos(TWOPI * frequency * x + TWOPI * phase))
d_phase = (TWOPI * amplitude *
np.cos(TWOPI * frequency * x + TWOPI * phase))
return [d_amplitude, d_frequency, d_phase]
@property
def inverse(self):
"""One dimensional inverse of Sine"""
return ArcSine1D(amplitude=self.amplitude, frequency=self.frequency, phase=self.phase)
class Cosine1D(_Trigonometric1D):
"""
One dimensional Cosine model.
Parameters
----------
amplitude : float
Oscillation amplitude
frequency : float
Oscillation frequency
phase : float
Oscillation phase
See Also
--------
ArcCosine1D, Sine1D, Tangent1D, Const1D, Linear1D
Notes
-----
Model formula:
.. math:: f(x) = A \\cos(2 \\pi f x + 2 \\pi p)
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Cosine1D
plt.figure()
s1 = Cosine1D(amplitude=1, frequency=.25)
r=np.arange(0, 10, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([0, 10, -5, 5])
plt.show()
"""
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional Cosine model function"""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = TWOPI * (frequency * x + phase)
if isinstance(argument, Quantity):
argument = argument.value
return amplitude * np.cos(argument)
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional Cosine model derivative"""
d_amplitude = np.cos(TWOPI * frequency * x + TWOPI * phase)
d_frequency = - (TWOPI * x * amplitude *
np.sin(TWOPI * frequency * x + TWOPI * phase))
d_phase = - (TWOPI * amplitude *
np.sin(TWOPI * frequency * x + TWOPI * phase))
return [d_amplitude, d_frequency, d_phase]
@property
def inverse(self):
"""One dimensional inverse of Cosine"""
return ArcCosine1D(amplitude=self.amplitude, frequency=self.frequency, phase=self.phase)
class Tangent1D(_Trigonometric1D):
"""
One dimensional Tangent model.
Parameters
----------
amplitude : float
Oscillation amplitude
frequency : float
Oscillation frequency
phase : float
Oscillation phase
See Also
--------
Sine1D, Cosine1D, Const1D, Linear1D
Notes
-----
Model formula:
.. math:: f(x) = A \\tan(2 \\pi f x + 2 \\pi p)
Note that the tangent function is undefined for inputs of the form
pi/2 + n*pi for all integers n. Thus thus the default bounding box
has been restricted to:
.. math:: [(-1/4 - p)/f, (1/4 - p)/f]
which is the smallest interval for the tangent function to be continuous
on.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Tangent1D
plt.figure()
s1 = Tangent1D(amplitude=1, frequency=.25)
r=np.arange(0, 10, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([0, 10, -5, 5])
plt.show()
"""
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional Tangent model function"""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = TWOPI * (frequency * x + phase)
if isinstance(argument, Quantity):
argument = argument.value
return amplitude * np.tan(argument)
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional Tangent model derivative"""
sec = 1 / (np.cos(TWOPI * frequency * x + TWOPI * phase))**2
d_amplitude = np.tan(TWOPI * frequency * x + TWOPI * phase)
d_frequency = TWOPI * x * amplitude * sec
d_phase = TWOPI * amplitude * sec
return [d_amplitude, d_frequency, d_phase]
@property
def inverse(self):
"""One dimensional inverse of Tangent"""
return ArcTangent1D(amplitude=self.amplitude, frequency=self.frequency, phase=self.phase)
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``
"""
bbox = [(-1/4 - self.phase) / self.frequency, (1/4 - self.phase) / self.frequency]
if self.frequency.unit is not None:
bbox = bbox / self.frequency.unit
return bbox
class _InverseTrigonometric1D(_Trigonometric1D):
"""
Base class for one dimensional inverse trigonometric models
"""
@property
def input_units(self):
if self.amplitude.unit is None:
return None
return {self.inputs[0]: self.amplitude.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'frequency': outputs_unit[self.outputs[0]] ** -1,
'amplitude': inputs_unit[self.inputs[0]]}
class ArcSine1D(_InverseTrigonometric1D):
"""
One dimensional ArcSine model returning values between -pi/2 and pi/2
only.
Parameters
----------
amplitude : float
Oscillation amplitude for corresponding Sine
frequency : float
Oscillation frequency for corresponding Sine
phase : float
Oscillation phase for corresponding Sine
See Also
--------
Sine1D, ArcCosine1D, ArcTangent1D
Notes
-----
Model formula:
.. math:: f(x) = ((arcsin(x / A) / 2pi) - p) / f
The arcsin function being used for this model will only accept inputs
in [-A, A]; otherwise, a runtime warning will be thrown and the result
will be NaN. To avoid this, the bounding_box has been properly set to
accommodate this; therefore, it is recommended that this model always
be evaluated with the ``with_bounding_box=True`` option.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import ArcSine1D
plt.figure()
s1 = ArcSine1D(amplitude=1, frequency=.25)
r=np.arange(-1, 1, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([-1, 1, -np.pi/2, np.pi/2])
plt.show()
"""
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional ArcSine model function"""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = x / amplitude
if isinstance(argument, Quantity):
argument = argument.value
arc_sine = np.arcsin(argument) / TWOPI
return (arc_sine - phase) / frequency
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional ArcSine model derivative"""
d_amplitude = - x / (TWOPI * frequency * amplitude**2 * np.sqrt(1 - (x / amplitude)**2))
d_frequency = (phase - (np.arcsin(x / amplitude) / TWOPI)) / frequency**2
d_phase = - 1 / frequency * np.ones(x.shape)
return [d_amplitude, d_frequency, d_phase]
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``
"""
return -1 * self.amplitude, 1 * self.amplitude
@property
def inverse(self):
"""One dimensional inverse of ArcSine"""
return Sine1D(amplitude=self.amplitude, frequency=self.frequency, phase=self.phase)
class ArcCosine1D(_InverseTrigonometric1D):
"""
One dimensional ArcCosine returning values between 0 and pi only.
Parameters
----------
amplitude : float
Oscillation amplitude for corresponding Cosine
frequency : float
Oscillation frequency for corresponding Cosine
phase : float
Oscillation phase for corresponding Cosine
See Also
--------
Cosine1D, ArcSine1D, ArcTangent1D
Notes
-----
Model formula:
.. math:: f(x) = ((arccos(x / A) / 2pi) - p) / f
The arccos function being used for this model will only accept inputs
in [-A, A]; otherwise, a runtime warning will be thrown and the result
will be NaN. To avoid this, the bounding_box has been properly set to
accommodate this; therefore, it is recommended that this model always
be evaluated with the ``with_bounding_box=True`` option.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import ArcCosine1D
plt.figure()
s1 = ArcCosine1D(amplitude=1, frequency=.25)
r=np.arange(-1, 1, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([-1, 1, 0, np.pi])
plt.show()
"""
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional ArcCosine model function"""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = x / amplitude
if isinstance(argument, Quantity):
argument = argument.value
arc_cos = np.arccos(argument) / TWOPI
return (arc_cos - phase) / frequency
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional ArcCosine model derivative"""
d_amplitude = x / (TWOPI * frequency * amplitude**2 * np.sqrt(1 - (x / amplitude)**2))
d_frequency = (phase - (np.arccos(x / amplitude) / TWOPI)) / frequency**2
d_phase = - 1 / frequency * np.ones(x.shape)
return [d_amplitude, d_frequency, d_phase]
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``
"""
return -1 * self.amplitude, 1 * self.amplitude
@property
def inverse(self):
"""One dimensional inverse of ArcCosine"""
return Cosine1D(amplitude=self.amplitude, frequency=self.frequency, phase=self.phase)
class ArcTangent1D(_InverseTrigonometric1D):
"""
One dimensional ArcTangent model returning values between -pi/2 and
pi/2 only.
Parameters
----------
amplitude : float
Oscillation amplitude for corresponding Tangent
frequency : float
Oscillation frequency for corresponding Tangent
phase : float
Oscillation phase for corresponding Tangent
See Also
--------
Tangent1D, ArcSine1D, ArcCosine1D
Notes
-----
Model formula:
.. math:: f(x) = ((arctan(x / A) / 2pi) - p) / f
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import ArcTangent1D
plt.figure()
s1 = ArcTangent1D(amplitude=1, frequency=.25)
r=np.arange(-10, 10, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([-10, 10, -np.pi/2, np.pi/2])
plt.show()
"""
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional ArcTangent model function"""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = x / amplitude
if isinstance(argument, Quantity):
argument = argument.value
arc_cos = np.arctan(argument) / TWOPI
return (arc_cos - phase) / frequency
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional ArcTangent model derivative"""
d_amplitude = - x / (TWOPI * frequency * amplitude**2 * (1 + (x / amplitude)**2))
d_frequency = (phase - (np.arctan(x / amplitude) / TWOPI)) / frequency**2
d_phase = - 1 / frequency * np.ones(x.shape)
return [d_amplitude, d_frequency, d_phase]
@property
def inverse(self):
"""One dimensional inverse of ArcTangent"""
return Tangent1D(amplitude=self.amplitude, frequency=self.frequency, phase=self.phase)
class Linear1D(Fittable1DModel):
"""
One dimensional Line model.
Parameters
----------
slope : float
Slope of the straight line
intercept : float
Intercept of the straight line
See Also
--------
Const1D
Notes
-----
Model formula:
.. math:: f(x) = a x + b
"""
slope = Parameter(default=1, description="Slope of the straight line")
intercept = Parameter(default=0, description="Intercept of the straight line")
linear = True
@staticmethod
def evaluate(x, slope, intercept):
"""One dimensional Line model function"""
return slope * x + intercept
@staticmethod
def fit_deriv(x, *params):
"""One dimensional Line model derivative with respect to parameters"""
d_slope = x
d_intercept = np.ones_like(x)
return [d_slope, d_intercept]
@property
def inverse(self):
new_slope = self.slope ** -1
new_intercept = -self.intercept / self.slope
return self.__class__(slope=new_slope, intercept=new_intercept)
@property
def input_units(self):
if self.intercept.unit is None and self.slope.unit is None:
return None
return {self.inputs[0]: self.intercept.unit / self.slope.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'intercept': outputs_unit[self.outputs[0]],
'slope': outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]]}
class Planar2D(Fittable2DModel):
"""
Two dimensional Plane model.
Parameters
----------
slope_x : float
Slope of the plane in X
slope_y : float
Slope of the plane in Y
intercept : float
Z-intercept of the plane
Notes
-----
Model formula:
.. math:: f(x, y) = a x + b y + c
"""
slope_x = Parameter(default=1, description="Slope of the plane in X")
slope_y = Parameter(default=1, description="Slope of the plane in Y")
intercept = Parameter(default=0, description="Z-intercept of the plane")
linear = True
@staticmethod
def evaluate(x, y, slope_x, slope_y, intercept):
"""Two dimensional Plane model function"""
return slope_x * x + slope_y * y + intercept
@staticmethod
def fit_deriv(x, y, *params):
"""Two dimensional Plane model derivative with respect to parameters"""
d_slope_x = x
d_slope_y = y
d_intercept = np.ones_like(x)
return [d_slope_x, d_slope_y, d_intercept]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'intercept': outputs_unit['z'],
'slope_x': outputs_unit['z'] / inputs_unit['x'],
'slope_y': outputs_unit['z'] / inputs_unit['y']}
class Lorentz1D(Fittable1DModel):
"""
One dimensional Lorentzian model.
Parameters
----------
amplitude : float or `~astropy.units.Quantity`.
Peak value - for a normalized profile (integrating to 1),
set amplitude = 2 / (np.pi * fwhm)
x_0 : float or `~astropy.units.Quantity`.
Position of the peak
fwhm : float or `~astropy.units.Quantity`.
Full width at half maximum (FWHM)
See Also
--------
Gaussian1D, Box1D, RickerWavelet1D
Notes
-----
Either all or none of input ``x``, position ``x_0`` and ``fwhm`` must be provided
consistently with compatible units or as unitless numbers.
Model formula:
.. math::
f(x) = \\frac{A \\gamma^{2}}{\\gamma^{2} + \\left(x - x_{0}\\right)^{2}}
where :math:`\\gamma` is half of given FWHM.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Lorentz1D
plt.figure()
s1 = Lorentz1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Peak value")
x_0 = Parameter(default=0, description="Position of the peak")
fwhm = Parameter(default=1, description="Full width at half maximum")
@staticmethod
def evaluate(x, amplitude, x_0, fwhm):
"""One dimensional Lorentzian model function"""
return (amplitude * ((fwhm / 2.) ** 2) / ((x - x_0) ** 2 +
(fwhm / 2.) ** 2))
@staticmethod
def fit_deriv(x, amplitude, x_0, fwhm):
"""One dimensional Lorentzian model derivative with respect to parameters"""
d_amplitude = fwhm ** 2 / (fwhm ** 2 + (x - x_0) ** 2)
d_x_0 = (amplitude * d_amplitude * (2 * x - 2 * x_0) /
(fwhm ** 2 + (x - x_0) ** 2))
d_fwhm = 2 * amplitude * d_amplitude / fwhm * (1 - d_amplitude)
return [d_amplitude, d_x_0, d_fwhm]
def bounding_box(self, factor=25):
"""Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
Parameters
----------
factor : float
The multiple of FWHM used to define the limits.
Default is chosen to include most (99%) of the
area under the curve, while still showing the
central feature of interest.
"""
x0 = self.x_0
dx = factor * self.fwhm
return (x0 - dx, x0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'x_0': inputs_unit[self.inputs[0]],
'fwhm': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Voigt1D(Fittable1DModel):
"""
One dimensional model for the Voigt profile.
Parameters
----------
x_0 : float or `~astropy.units.Quantity`
Position of the peak
amplitude_L : float or `~astropy.units.Quantity`.
The Lorentzian amplitude (peak of the associated Lorentz function)
- for a normalized profile (integrating to 1), set
amplitude_L = 2 / (np.pi * fwhm_L)
fwhm_L : float or `~astropy.units.Quantity`
The Lorentzian full width at half maximum
fwhm_G : float or `~astropy.units.Quantity`.
The Gaussian full width at half maximum
method : str, optional
Algorithm for computing the complex error function; one of
'Humlicek2' (default, fast and generally more accurate than ``rtol=3.e-5``) or
'Scipy', alternatively 'wofz' (requires ``scipy``, almost as fast and
reference in accuracy).
See Also
--------
Gaussian1D, Lorentz1D
Notes
-----
Either all or none of input ``x``, position ``x_0`` and the ``fwhm_*`` must be provided
consistently with compatible units or as unitless numbers.
Voigt function is calculated as real part of the complex error function computed from either
Humlicek's rational approximations (JQSRT 21:309, 1979; 27:437, 1982) following
Schreier 2018 (MNRAS 479, 3068; and ``hum2zpf16m`` from his cpfX.py module); or
`~scipy.special.wofz` (implementing 'Faddeeva.cc').
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Voigt1D
import matplotlib.pyplot as plt
plt.figure()
x = np.arange(0, 10, 0.01)
v1 = Voigt1D(x_0=5, amplitude_L=10, fwhm_L=0.5, fwhm_G=0.9)
plt.plot(x, v1(x))
plt.show()
"""
x_0 = Parameter(default=0,
description="Position of the peak")
amplitude_L = Parameter(default=1, # noqa: N815
description="The Lorentzian amplitude")
fwhm_L = Parameter(default=2/np.pi, # noqa: N815
description="The Lorentzian full width at half maximum")
fwhm_G = Parameter(default=np.log(2), # noqa: N815
description="The Gaussian full width at half maximum")
sqrt_pi = np.sqrt(np.pi)
sqrt_ln2 = np.sqrt(np.log(2))
sqrt_ln2pi = np.sqrt(np.log(2) * np.pi)
_last_z = np.zeros(1, dtype=complex)
_last_w = np.zeros(1, dtype=float)
_faddeeva = None
def __init__(self, x_0=x_0.default, amplitude_L=amplitude_L.default, # noqa: N803
fwhm_L=fwhm_L.default, fwhm_G=fwhm_G.default, method='humlicek2', # noqa: N803
**kwargs):
if str(method).lower() in ('wofz', 'scipy'):
from scipy.special import wofz
self._faddeeva = wofz
elif str(method).lower() == 'humlicek2':
self._faddeeva = self._hum2zpf16c
else:
raise ValueError(f'Not a valid method for Voigt1D Faddeeva function: {method}.')
self.method = self._faddeeva.__name__
super().__init__(x_0=x_0, amplitude_L=amplitude_L, fwhm_L=fwhm_L, fwhm_G=fwhm_G, **kwargs)
def _wrap_wofz(self, z):
"""Call complex error (Faddeeva) function w(z) implemented by algorithm `method`;
cache results for consecutive calls from `evaluate`, `fit_deriv`."""
if (z.shape == self._last_z.shape and
np.allclose(z, self._last_z, rtol=1.e-14, atol=1.e-15)):
return self._last_w
self._last_w = self._faddeeva(z)
self._last_z = z
return self._last_w
def evaluate(self, x, x_0, amplitude_L, fwhm_L, fwhm_G): # noqa: N803
"""One dimensional Voigt function scaled to Lorentz peak amplitude."""
z = np.atleast_1d(2 * (x - x_0) + 1j * fwhm_L) * self.sqrt_ln2 / fwhm_G
# The normalised Voigt profile is w.real * self.sqrt_ln2 / (self.sqrt_pi * fwhm_G) * 2 ;
# for the legacy definition we multiply with np.pi * fwhm_L / 2 * amplitude_L
return self._wrap_wofz(z).real * self.sqrt_ln2pi / fwhm_G * fwhm_L * amplitude_L
def fit_deriv(self, x, x_0, amplitude_L, fwhm_L, fwhm_G): # noqa: N803
"""Derivative of the one dimensional Voigt function with respect to parameters."""
s = self.sqrt_ln2 / fwhm_G
z = np.atleast_1d(2 * (x - x_0) + 1j * fwhm_L) * s
# V * constant from McLean implementation (== their Voigt function)
w = self._wrap_wofz(z) * s * fwhm_L * amplitude_L * self.sqrt_pi
# Schreier (2018) Eq. 6 == (dvdx + 1j * dvdy) / (sqrt(pi) * fwhm_L * amplitude_L)
dwdz = -2 * z * w + 2j * s * fwhm_L * amplitude_L
return [-dwdz.real * 2 * s,
w.real / amplitude_L,
w.real / fwhm_L - dwdz.imag * s,
(-w.real - s * (2 * (x - x_0) * dwdz.real - fwhm_L * dwdz.imag)) / fwhm_G]
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'x_0': inputs_unit[self.inputs[0]],
'fwhm_L': inputs_unit[self.inputs[0]],
'fwhm_G': inputs_unit[self.inputs[0]],
'amplitude_L': outputs_unit[self.outputs[0]]}
@staticmethod
def _hum2zpf16c(z, s=10.0):
"""Complex error function w(z) for z = x + iy combining Humlicek's rational approximations:
|x| + y > 10: Humlicek (JQSRT, 1982) rational approximation for region II;
else: Humlicek (JQSRT, 1979) rational approximation with n=16 and delta=y0=1.35
Version using a mask and np.place;
single complex argument version of Franz Schreier's cpfX.hum2zpf16m.
Originally licensed under a 3-clause BSD style license - see
https://atmos.eoc.dlr.de/tools/lbl4IR/cpfX.py
"""
# Optimized (single fraction) Humlicek region I rational approximation for n=16, delta=1.35
AA = np.array([+46236.3358828121, -147726.58393079657j, # noqa: N806
-206562.80451354137, 281369.1590631087j,
+183092.74968253175, -184787.96830696272j,
-66155.39578477248, 57778.05827983565j,
+11682.770904216826, -9442.402767960672j,
-1052.8438624933142, 814.0996198624186j,
+45.94499030751872, -34.59751573708725j,
-0.7616559377907136, 0.5641895835476449j]) # 1j/sqrt(pi) to the 12. digit
bb = np.array([+7918.06640624997, 0.0,
-126689.0625, 0.0,
+295607.8125, 0.0,
-236486.25, 0.0,
+84459.375, 0.0,
-15015.0, 0.0,
+1365.0, 0.0,
-60.0, 0.0,
+1.0])
sqrt_piinv = 1.0 / np.sqrt(np.pi)
zz = z * z
w = 1j * (z * (zz * sqrt_piinv - 1.410474)) / (0.75 + zz*(zz - 3.0))
if np.any(z.imag < s):
mask = abs(z.real) + z.imag < s # returns true for interior points
# returns small complex array covering only the interior region
Z = z[np.where(mask)] + 1.35j
ZZ = Z * Z
numer = (((((((((((((((AA[15]*Z + AA[14])*Z + AA[13])*Z + AA[12])*Z + AA[11])*Z +
AA[10])*Z + AA[9])*Z + AA[8])*Z + AA[7])*Z + AA[6])*Z +
AA[5])*Z + AA[4])*Z+AA[3])*Z + AA[2])*Z + AA[1])*Z + AA[0])
denom = (((((((ZZ + bb[14])*ZZ + bb[12])*ZZ + bb[10])*ZZ+bb[8])*ZZ + bb[6])*ZZ +
bb[4])*ZZ + bb[2])*ZZ + bb[0]
np.place(w, mask, numer / denom)
return w
class Const1D(Fittable1DModel):
"""
One dimensional Constant model.
Parameters
----------
amplitude : float
Value of the constant function
See Also
--------
Const2D
Notes
-----
Model formula:
.. math:: f(x) = A
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Const1D
plt.figure()
s1 = Const1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Value of the constant function")
linear = True
@staticmethod
def evaluate(x, amplitude):
"""One dimensional Constant model function"""
if amplitude.size == 1:
# This is slightly faster than using ones_like and multiplying
x = np.empty_like(amplitude, shape=x.shape, dtype=x.dtype)
x.fill(amplitude.item())
else:
# This case is less likely but could occur if the amplitude
# parameter is given an array-like value
x = amplitude * np.ones_like(x, subok=False)
return x
@staticmethod
def fit_deriv(x, amplitude):
"""One dimensional Constant model derivative with respect to parameters"""
d_amplitude = np.ones_like(x)
return [d_amplitude]
@property
def input_units(self):
return None
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'amplitude': outputs_unit[self.outputs[0]]}
class Const2D(Fittable2DModel):
"""
Two dimensional Constant model.
Parameters
----------
amplitude : float
Value of the constant function
See Also
--------
Const1D
Notes
-----
Model formula:
.. math:: f(x, y) = A
"""
amplitude = Parameter(default=1, description="Value of the constant function")
linear = True
@staticmethod
def evaluate(x, y, amplitude):
"""Two dimensional Constant model function"""
if amplitude.size == 1:
# This is slightly faster than using ones_like and multiplying
x = np.empty_like(amplitude, shape=x.shape, dtype=x.dtype)
x.fill(amplitude.item())
else:
# This case is less likely but could occur if the amplitude
# parameter is given an array-like value
x = amplitude * np.ones_like(x, subok=False)
return x
@property
def input_units(self):
return None
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'amplitude': outputs_unit[self.outputs[0]]}
class Ellipse2D(Fittable2DModel):
"""
A 2D Ellipse model.
Parameters
----------
amplitude : float
Value of the ellipse.
x_0 : float
x position of the center of the disk.
y_0 : float
y position of the center of the disk.
a : float
The length of the semimajor axis.
b : float
The length of the semiminor axis.
theta : float or `~astropy.units.Quantity`, optional
The rotation angle as an angular quantity
(`~astropy.units.Quantity` or `~astropy.coordinates.Angle`)
or a value in radians (as a float). The rotation angle
increases counterclockwise from the positive x axis.
See Also
--------
Disk2D, Box2D
Notes
-----
Model formula:
.. math::
f(x, y) = \\left \\{
\\begin{array}{ll}
\\mathrm{amplitude} & : \\left[\\frac{(x - x_0) \\cos
\\theta + (y - y_0) \\sin \\theta}{a}\\right]^2 +
\\left[\\frac{-(x - x_0) \\sin \\theta + (y - y_0)
\\cos \\theta}{b}\\right]^2 \\leq 1 \\\\
0 & : \\mathrm{otherwise}
\\end{array}
\\right.
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Ellipse2D
from astropy.coordinates import Angle
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
x0, y0 = 25, 25
a, b = 20, 10
theta = Angle(30, 'deg')
e = Ellipse2D(amplitude=100., x_0=x0, y_0=y0, a=a, b=b,
theta=theta.radian)
y, x = np.mgrid[0:50, 0:50]
fig, ax = plt.subplots(1, 1)
ax.imshow(e(x, y), origin='lower', interpolation='none', cmap='Greys_r')
e2 = mpatches.Ellipse((x0, y0), 2*a, 2*b, theta.degree, edgecolor='red',
facecolor='none')
ax.add_patch(e2)
plt.show()
"""
amplitude = Parameter(default=1, description="Value of the ellipse")
x_0 = Parameter(default=0, description="X position of the center of the disk.")
y_0 = Parameter(default=0, description="Y position of the center of the disk.")
a = Parameter(default=1, description="The length of the semimajor axis")
b = Parameter(default=1, description="The length of the semiminor axis")
theta = Parameter(default=0.0, description=("Rotation angle either as a "
"float (in radians) or a "
"|Quantity| angle"))
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, a, b, theta):
"""Two dimensional Ellipse model function."""
xx = x - x_0
yy = y - y_0
cost = np.cos(theta)
sint = np.sin(theta)
numerator1 = (xx * cost) + (yy * sint)
numerator2 = -(xx * sint) + (yy * cost)
in_ellipse = (((numerator1 / a) ** 2 + (numerator2 / b) ** 2) <= 1.)
result = np.select([in_ellipse], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``((y_low, y_high), (x_low, x_high))``
"""
a = self.a
b = self.b
theta = self.theta
dx, dy = ellipse_extent(a, b, theta)
return ((self.y_0 - dy, self.y_0 + dy),
(self.x_0 - dx, self.x_0 + dx))
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit,
self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {'x_0': inputs_unit[self.inputs[0]],
'y_0': inputs_unit[self.inputs[0]],
'a': inputs_unit[self.inputs[0]],
'b': inputs_unit[self.inputs[0]],
'theta': u.rad,
'amplitude': outputs_unit[self.outputs[0]]}
class Disk2D(Fittable2DModel):
"""
Two dimensional radial symmetric Disk model.
Parameters
----------
amplitude : float
Value of the disk function
x_0 : float
x position center of the disk
y_0 : float
y position center of the disk
R_0 : float
Radius of the disk
See Also
--------
Box2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(r) = \\left \\{
\\begin{array}{ll}
A & : r \\leq R_0 \\\\
0 & : r > R_0
\\end{array}
\\right.
"""
amplitude = Parameter(default=1, description="Value of disk function")
x_0 = Parameter(default=0, description="X position of center of the disk")
y_0 = Parameter(default=0, description="Y position of center of the disk")
R_0 = Parameter(default=1, description="Radius of the disk")
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, R_0):
"""Two dimensional Disk model function"""
rr = (x - x_0) ** 2 + (y - y_0) ** 2
result = np.select([rr <= R_0 ** 2], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``((y_low, y_high), (x_low, x_high))``
"""
return ((self.y_0 - self.R_0, self.y_0 + self.R_0),
(self.x_0 - self.R_0, self.x_0 + self.R_0))
@property
def input_units(self):
if self.x_0.unit is None and self.y_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit,
self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {'x_0': inputs_unit[self.inputs[0]],
'y_0': inputs_unit[self.inputs[0]],
'R_0': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Ring2D(Fittable2DModel):
"""
Two dimensional radial symmetric Ring model.
Parameters
----------
amplitude : float
Value of the disk function
x_0 : float
x position center of the disk
y_0 : float
y position center of the disk
r_in : float
Inner radius of the ring
width : float
Width of the ring.
r_out : float
Outer Radius of the ring. Can be specified instead of width.
See Also
--------
Disk2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(r) = \\left \\{
\\begin{array}{ll}
A & : r_{in} \\leq r \\leq r_{out} \\\\
0 & : \\text{else}
\\end{array}
\\right.
Where :math:`r_{out} = r_{in} + r_{width}`.
"""
amplitude = Parameter(default=1, description="Value of the disk function")
x_0 = Parameter(default=0, description="X position of center of disc")
y_0 = Parameter(default=0, description="Y position of center of disc")
r_in = Parameter(default=1, description="Inner radius of the ring")
width = Parameter(default=1, description="Width of the ring")
def __init__(self, amplitude=amplitude.default, x_0=x_0.default,
y_0=y_0.default, r_in=None, width=None,
r_out=None, **kwargs):
if (r_in is None) and (r_out is None) and (width is None):
r_in = self.r_in.default
width = self.width.default
elif (r_in is not None) and (r_out is None) and (width is None):
width = self.width.default
elif (r_in is None) and (r_out is not None) and (width is None):
r_in = self.r_in.default
width = r_out - r_in
elif (r_in is None) and (r_out is None) and (width is not None):
r_in = self.r_in.default
elif (r_in is not None) and (r_out is not None) and (width is None):
width = r_out - r_in
elif (r_in is None) and (r_out is not None) and (width is not None):
r_in = r_out - width
elif (r_in is not None) and (r_out is not None) and (width is not None):
if np.any(width != (r_out - r_in)):
raise InputParameterError("Width must be r_out - r_in")
if np.any(r_in < 0) or np.any(width < 0):
raise InputParameterError(f"{r_in=} and {width=} must both be >=0")
super().__init__(
amplitude=amplitude, x_0=x_0, y_0=y_0, r_in=r_in, width=width,
**kwargs)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, r_in, width):
"""Two dimensional Ring model function."""
rr = (x - x_0) ** 2 + (y - y_0) ** 2
r_range = np.logical_and(rr >= r_in ** 2, rr <= (r_in + width) ** 2)
result = np.select([r_range], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dr = self.r_in + self.width
return ((self.y_0 - dr, self.y_0 + dr),
(self.x_0 - dr, self.x_0 + dr))
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit,
self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {'x_0': inputs_unit[self.inputs[0]],
'y_0': inputs_unit[self.inputs[0]],
'r_in': inputs_unit[self.inputs[0]],
'width': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Box1D(Fittable1DModel):
"""
One dimensional Box model.
Parameters
----------
amplitude : float
Amplitude A
x_0 : float
Position of the center of the box function
width : float
Width of the box
See Also
--------
Box2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(x) = \\left \\{
\\begin{array}{ll}
A & : x_0 - w/2 \\leq x \\leq x_0 + w/2 \\\\
0 & : \\text{else}
\\end{array}
\\right.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Box1D
plt.figure()
s1 = Box1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Amplitude A")
x_0 = Parameter(default=0, description="Position of center of box function")
width = Parameter(default=1, description="Width of the box")
@staticmethod
def evaluate(x, amplitude, x_0, width):
"""One dimensional Box model function"""
inside = np.logical_and(x >= x_0 - width / 2., x <= x_0 + width / 2.)
return np.select([inside], [amplitude], 0)
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``(x_low, x_high))``
"""
dx = self.width / 2
return (self.x_0 - dx, self.x_0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
@property
def return_units(self):
if self.amplitude.unit is None:
return None
return {self.outputs[0]: self.amplitude.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'x_0': inputs_unit[self.inputs[0]],
'width': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Box2D(Fittable2DModel):
"""
Two dimensional Box model.
Parameters
----------
amplitude : float
Amplitude
x_0 : float
x position of the center of the box function
x_width : float
Width in x direction of the box
y_0 : float
y position of the center of the box function
y_width : float
Width in y direction of the box
See Also
--------
Box1D, Gaussian2D, Moffat2D
Notes
-----
Model formula:
.. math::
f(x, y) = \\left \\{
\\begin{array}{ll}
A : & x_0 - w_x/2 \\leq x \\leq x_0 + w_x/2 \\text{ and} \\\\
& y_0 - w_y/2 \\leq y \\leq y_0 + w_y/2 \\\\
0 : & \\text{else}
\\end{array}
\\right.
"""
amplitude = Parameter(default=1, description="Amplitude")
x_0 = Parameter(default=0, description="X position of the center of the box function")
y_0 = Parameter(default=0, description="Y position of the center of the box function")
x_width = Parameter(default=1, description="Width in x direction of the box")
y_width = Parameter(default=1, description="Width in y direction of the box")
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, x_width, y_width):
"""Two dimensional Box model function"""
x_range = np.logical_and(x >= x_0 - x_width / 2.,
x <= x_0 + x_width / 2.)
y_range = np.logical_and(y >= y_0 - y_width / 2.,
y <= y_0 + y_width / 2.)
result = np.select([np.logical_and(x_range, y_range)], [amplitude], 0)
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dx = self.x_width / 2
dy = self.y_width / 2
return ((self.y_0 - dy, self.y_0 + dy),
(self.x_0 - dx, self.x_0 + dx))
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit,
self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'x_0': inputs_unit[self.inputs[0]],
'y_0': inputs_unit[self.inputs[1]],
'x_width': inputs_unit[self.inputs[0]],
'y_width': inputs_unit[self.inputs[1]],
'amplitude': outputs_unit[self.outputs[0]]}
class Trapezoid1D(Fittable1DModel):
"""
One dimensional Trapezoid model.
Parameters
----------
amplitude : float
Amplitude of the trapezoid
x_0 : float
Center position of the trapezoid
width : float
Width of the constant part of the trapezoid.
slope : float
Slope of the tails of the trapezoid
See Also
--------
Box1D, Gaussian1D, Moffat1D
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Trapezoid1D
plt.figure()
s1 = Trapezoid1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Amplitude of the trapezoid")
x_0 = Parameter(default=0, description="Center position of the trapezoid")
width = Parameter(default=1, description="Width of constant part of the trapezoid")
slope = Parameter(default=1, description="Slope of the tails of trapezoid")
@staticmethod
def evaluate(x, amplitude, x_0, width, slope):
"""One dimensional Trapezoid model function"""
# Compute the four points where the trapezoid changes slope
# x1 <= x2 <= x3 <= x4
x2 = x_0 - width / 2.
x3 = x_0 + width / 2.
x1 = x2 - amplitude / slope
x4 = x3 + amplitude / slope
# Compute model values in pieces between the change points
range_a = np.logical_and(x >= x1, x < x2)
range_b = np.logical_and(x >= x2, x < x3)
range_c = np.logical_and(x >= x3, x < x4)
val_a = slope * (x - x1)
val_b = amplitude
val_c = slope * (x4 - x)
result = np.select([range_a, range_b, range_c], [val_a, val_b, val_c])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``(x_low, x_high))``
"""
dx = self.width / 2 + self.amplitude / self.slope
return (self.x_0 - dx, self.x_0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'x_0': inputs_unit[self.inputs[0]],
'width': inputs_unit[self.inputs[0]],
'slope': outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class TrapezoidDisk2D(Fittable2DModel):
"""
Two dimensional circular Trapezoid model.
Parameters
----------
amplitude : float
Amplitude of the trapezoid
x_0 : float
x position of the center of the trapezoid
y_0 : float
y position of the center of the trapezoid
R_0 : float
Radius of the constant part of the trapezoid.
slope : float
Slope of the tails of the trapezoid in x direction.
See Also
--------
Disk2D, Box2D
"""
amplitude = Parameter(default=1, description="Amplitude of the trapezoid")
x_0 = Parameter(default=0, description="X position of the center of the trapezoid")
y_0 = Parameter(default=0, description="Y position of the center of the trapezoid")
R_0 = Parameter(default=1, description="Radius of constant part of trapezoid")
slope = Parameter(default=1, description="Slope of tails of trapezoid in x direction")
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, R_0, slope):
"""Two dimensional Trapezoid Disk model function"""
r = np.sqrt((x - x_0) ** 2 + (y - y_0) ** 2)
range_1 = r <= R_0
range_2 = np.logical_and(r > R_0, r <= R_0 + amplitude / slope)
val_1 = amplitude
val_2 = amplitude + slope * (R_0 - r)
result = np.select([range_1, range_2], [val_1, val_2])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dr = self.R_0 + self.amplitude / self.slope
return ((self.y_0 - dr, self.y_0 + dr),
(self.x_0 - dr, self.x_0 + dr))
@property
def input_units(self):
if self.x_0.unit is None and self.y_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit,
self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {'x_0': inputs_unit[self.inputs[0]],
'y_0': inputs_unit[self.inputs[0]],
'R_0': inputs_unit[self.inputs[0]],
'slope': outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class RickerWavelet1D(Fittable1DModel):
"""
One dimensional Ricker Wavelet model (sometimes known as a "Mexican Hat"
model).
.. note::
See https://github.com/astropy/astropy/pull/9445 for discussions
related to renaming of this model.
Parameters
----------
amplitude : float
Amplitude
x_0 : float
Position of the peak
sigma : float
Width of the Ricker wavelet
See Also
--------
RickerWavelet2D, Box1D, Gaussian1D, Trapezoid1D
Notes
-----
Model formula:
.. math::
f(x) = {A \\left(1 - \\frac{\\left(x - x_{0}\\right)^{2}}{\\sigma^{2}}\\right)
e^{- \\frac{\\left(x - x_{0}\\right)^{2}}{2 \\sigma^{2}}}}
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import RickerWavelet1D
plt.figure()
s1 = RickerWavelet1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -2, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Amplitude (peak) value")
x_0 = Parameter(default=0, description="Position of the peak")
sigma = Parameter(default=1, description="Width of the Ricker wavelet")
@staticmethod
def evaluate(x, amplitude, x_0, sigma):
"""One dimensional Ricker Wavelet model function"""
xx_ww = (x - x_0) ** 2 / (2 * sigma ** 2)
return amplitude * (1 - 2 * xx_ww) * np.exp(-xx_ww)
def bounding_box(self, factor=10.0):
"""Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
Parameters
----------
factor : float
The multiple of sigma used to define the limits.
"""
x0 = self.x_0
dx = factor * self.sigma
return (x0 - dx, x0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'x_0': inputs_unit[self.inputs[0]],
'sigma': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class RickerWavelet2D(Fittable2DModel):
"""
Two dimensional Ricker Wavelet model (sometimes known as a "Mexican Hat"
model).
.. note::
See https://github.com/astropy/astropy/pull/9445 for discussions
related to renaming of this model.
Parameters
----------
amplitude : float
Amplitude
x_0 : float
x position of the peak
y_0 : float
y position of the peak
sigma : float
Width of the Ricker wavelet
See Also
--------
RickerWavelet1D, Gaussian2D
Notes
-----
Model formula:
.. math::
f(x, y) = A \\left(1 - \\frac{\\left(x - x_{0}\\right)^{2}
+ \\left(y - y_{0}\\right)^{2}}{\\sigma^{2}}\\right)
e^{\\frac{- \\left(x - x_{0}\\right)^{2}
- \\left(y - y_{0}\\right)^{2}}{2 \\sigma^{2}}}
"""
amplitude = Parameter(default=1, description="Amplitude (peak) value")
x_0 = Parameter(default=0, description="X position of the peak")
y_0 = Parameter(default=0, description="Y position of the peak")
sigma = Parameter(default=1, description="Width of the Ricker wavelet")
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, sigma):
"""Two dimensional Ricker Wavelet model function"""
rr_ww = ((x - x_0) ** 2 + (y - y_0) ** 2) / (2 * sigma ** 2)
return amplitude * (1 - rr_ww) * np.exp(- rr_ww)
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit,
self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {'x_0': inputs_unit[self.inputs[0]],
'y_0': inputs_unit[self.inputs[0]],
'sigma': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class AiryDisk2D(Fittable2DModel):
"""
Two dimensional Airy disk model.
Parameters
----------
amplitude : float
Amplitude of the Airy function.
x_0 : float
x position of the maximum of the Airy function.
y_0 : float
y position of the maximum of the Airy function.
radius : float
The radius of the Airy disk (radius of the first zero).
See Also
--------
Box2D, TrapezoidDisk2D, Gaussian2D
Notes
-----
Model formula:
.. math:: f(r) = A \\left[
\\frac{2 J_1(\\frac{\\pi r}{R/R_z})}{\\frac{\\pi r}{R/R_z}}
\\right]^2
Where :math:`J_1` is the first order Bessel function of the first
kind, :math:`r` is radial distance from the maximum of the Airy
function (:math:`r = \\sqrt{(x - x_0)^2 + (y - y_0)^2}`), :math:`R`
is the input ``radius`` parameter, and :math:`R_z =
1.2196698912665045`).
For an optical system, the radius of the first zero represents the
limiting angular resolution and is approximately 1.22 * lambda / D,
where lambda is the wavelength of the light and D is the diameter of
the aperture.
See [1]_ for more details about the Airy disk.
References
----------
.. [1] https://en.wikipedia.org/wiki/Airy_disk
"""
amplitude = Parameter(default=1, description="Amplitude (peak value) of the Airy function")
x_0 = Parameter(default=0, description="X position of the peak")
y_0 = Parameter(default=0, description="Y position of the peak")
radius = Parameter(default=1,
description="The radius of the Airy disk (radius of first zero crossing)")
_rz = None
_j1 = None
@classmethod
def evaluate(cls, x, y, amplitude, x_0, y_0, radius):
"""Two dimensional Airy model function"""
if cls._rz is None:
from scipy.special import j1, jn_zeros
cls._rz = jn_zeros(1, 1)[0] / np.pi
cls._j1 = j1
r = np.sqrt((x - x_0) ** 2 + (y - y_0) ** 2) / (radius / cls._rz)
if isinstance(r, Quantity):
# scipy function cannot handle Quantity, so turn into array.
r = r.to_value(u.dimensionless_unscaled)
# Since r can be zero, we have to take care to treat that case
# separately so as not to raise a numpy warning
z = np.ones(r.shape)
rt = np.pi * r[r > 0]
z[r > 0] = (2.0 * cls._j1(rt) / rt) ** 2
if isinstance(amplitude, Quantity):
# make z quantity too, otherwise in-place multiplication fails.
z = Quantity(z, u.dimensionless_unscaled, copy=False)
z *= amplitude
return z
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit,
self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {'x_0': inputs_unit[self.inputs[0]],
'y_0': inputs_unit[self.inputs[0]],
'radius': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Moffat1D(Fittable1DModel):
"""
One dimensional Moffat model.
Parameters
----------
amplitude : float
Amplitude of the model.
x_0 : float
x position of the maximum of the Moffat model.
gamma : float
Core width of the Moffat model.
alpha : float
Power index of the Moffat model.
See Also
--------
Gaussian1D, Box1D
Notes
-----
Model formula:
.. math::
f(x) = A \\left(1 + \\frac{\\left(x - x_{0}\\right)^{2}}{\\gamma^{2}}\\right)^{- \\alpha}
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Moffat1D
plt.figure()
s1 = Moffat1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Amplitude of the model")
x_0 = Parameter(default=0, description="X position of maximum of Moffat model")
gamma = Parameter(default=1, description="Core width of Moffat model")
alpha = Parameter(default=1, description="Power index of the Moffat model")
@property
def fwhm(self):
"""
Moffat full width at half maximum.
Derivation of the formula is available in
`this notebook by Yoonsoo Bach
<https://nbviewer.jupyter.org/github/ysbach/AO_2017/blob/master/04_Ground_Based_Concept.ipynb#1.2.-Moffat>`_.
"""
return 2.0 * np.abs(self.gamma) * np.sqrt(2.0 ** (1.0 / self.alpha) - 1.0)
@staticmethod
def evaluate(x, amplitude, x_0, gamma, alpha):
"""One dimensional Moffat model function"""
return amplitude * (1 + ((x - x_0) / gamma) ** 2) ** (-alpha)
@staticmethod
def fit_deriv(x, amplitude, x_0, gamma, alpha):
"""One dimensional Moffat model derivative with respect to parameters"""
fac = (1 + (x - x_0) ** 2 / gamma ** 2)
d_A = fac ** (-alpha)
d_x_0 = (2 * amplitude * alpha * (x - x_0) * d_A / (fac * gamma ** 2))
d_gamma = (2 * amplitude * alpha * (x - x_0) ** 2 * d_A /
(fac * gamma ** 3))
d_alpha = -amplitude * d_A * np.log(fac)
return [d_A, d_x_0, d_gamma, d_alpha]
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'x_0': inputs_unit[self.inputs[0]],
'gamma': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Moffat2D(Fittable2DModel):
"""
Two dimensional Moffat model.
Parameters
----------
amplitude : float
Amplitude of the model.
x_0 : float
x position of the maximum of the Moffat model.
y_0 : float
y position of the maximum of the Moffat model.
gamma : float
Core width of the Moffat model.
alpha : float
Power index of the Moffat model.
See Also
--------
Gaussian2D, Box2D
Notes
-----
Model formula:
.. math::
f(x, y) = A \\left(1 + \\frac{\\left(x - x_{0}\\right)^{2} +
\\left(y - y_{0}\\right)^{2}}{\\gamma^{2}}\\right)^{- \\alpha}
"""
amplitude = Parameter(default=1, description="Amplitude (peak value) of the model")
x_0 = Parameter(default=0, description="X position of the maximum of the Moffat model")
y_0 = Parameter(default=0, description="Y position of the maximum of the Moffat model")
gamma = Parameter(default=1, description="Core width of the Moffat model")
alpha = Parameter(default=1, description="Power index of the Moffat model")
@property
def fwhm(self):
"""
Moffat full width at half maximum.
Derivation of the formula is available in
`this notebook by Yoonsoo Bach
<https://nbviewer.jupyter.org/github/ysbach/AO_2017/blob/master/04_Ground_Based_Concept.ipynb#1.2.-Moffat>`_.
"""
return 2.0 * np.abs(self.gamma) * np.sqrt(2.0 ** (1.0 / self.alpha) - 1.0)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, gamma, alpha):
"""Two dimensional Moffat model function"""
rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2
return amplitude * (1 + rr_gg) ** (-alpha)
@staticmethod
def fit_deriv(x, y, amplitude, x_0, y_0, gamma, alpha):
"""Two dimensional Moffat model derivative with respect to parameters"""
rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2
d_A = (1 + rr_gg) ** (-alpha)
d_x_0 = (2 * amplitude * alpha * d_A * (x - x_0) /
(gamma ** 2 * (1 + rr_gg)))
d_y_0 = (2 * amplitude * alpha * d_A * (y - y_0) /
(gamma ** 2 * (1 + rr_gg)))
d_alpha = -amplitude * d_A * np.log(1 + rr_gg)
d_gamma = (2 * amplitude * alpha * d_A * rr_gg /
(gamma * (1 + rr_gg)))
return [d_A, d_x_0, d_y_0, d_gamma, d_alpha]
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {self.inputs[0]: self.x_0.unit,
self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {'x_0': inputs_unit[self.inputs[0]],
'y_0': inputs_unit[self.inputs[0]],
'gamma': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Sersic2D(Fittable2DModel):
r"""
Two dimensional Sersic surface brightness profile.
Parameters
----------
amplitude : float
Surface brightness at r_eff.
r_eff : float
Effective (half-light) radius
n : float
Sersic Index.
x_0 : float, optional
x position of the center.
y_0 : float, optional
y position of the center.
ellip : float, optional
Ellipticity.
theta : float or `~astropy.units.Quantity`, optional
The rotation angle as an angular quantity
(`~astropy.units.Quantity` or `~astropy.coordinates.Angle`)
or a value in radians (as a float). The rotation angle
increases counterclockwise from the positive x axis.
See Also
--------
Gaussian2D, Moffat2D
Notes
-----
Model formula:
.. math::
I(x,y) = I(r) = I_e\exp\left\{
-b_n\left[\left(\frac{r}{r_{e}}\right)^{(1/n)}-1\right]
\right\}
The constant :math:`b_n` is defined such that :math:`r_e` contains half the total
luminosity, and can be solved for numerically.
.. math::
\Gamma(2n) = 2\gamma (2n,b_n)
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Sersic2D
import matplotlib.pyplot as plt
x,y = np.meshgrid(np.arange(100), np.arange(100))
mod = Sersic2D(amplitude = 1, r_eff = 25, n=4, x_0=50, y_0=50,
ellip=.5, theta=-1)
img = mod(x, y)
log_img = np.log10(img)
plt.figure()
plt.imshow(log_img, origin='lower', interpolation='nearest',
vmin=-1, vmax=2)
plt.xlabel('x')
plt.ylabel('y')
cbar = plt.colorbar()
cbar.set_label('Log Brightness', rotation=270, labelpad=25)
cbar.set_ticks([-1, 0, 1, 2], update_ticks=True)
plt.show()
References
----------
.. [1] http://ned.ipac.caltech.edu/level5/March05/Graham/Graham2.html
"""
amplitude = Parameter(default=1, description="Surface brightness at r_eff")
r_eff = Parameter(default=1, description="Effective (half-light) radius")
n = Parameter(default=4, description="Sersic Index")
x_0 = Parameter(default=0, description="X position of the center")
y_0 = Parameter(default=0, description="Y position of the center")
ellip = Parameter(default=0, description="Ellipticity")
theta = Parameter(default=0.0, description=("Rotation angle either as a "
"float (in radians) or a "
"|Quantity| angle"))
_gammaincinv = None
@classmethod
def evaluate(cls, x, y, amplitude, r_eff, n, x_0, y_0, ellip, theta):
"""Two dimensional Sersic profile function."""
if cls._gammaincinv is None:
from scipy.special import gammaincinv
cls._gammaincinv = gammaincinv
bn = cls._gammaincinv(2. * n, 0.5)
a, b = r_eff, (1 - ellip) * r_eff
cos_theta, sin_theta = np.cos(theta), np.sin(theta)
x_maj = (x - x_0) * cos_theta + (y - y_0) * sin_theta
x_min = -(x - x_0) * sin_theta + (y - y_0) * cos_theta
z = np.sqrt((x_maj / a) ** 2 + (x_min / b) ** 2)
return amplitude * np.exp(-bn * (z ** (1 / n) - 1))
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit,
self.inputs[1]: self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return {'x_0': inputs_unit[self.inputs[0]],
'y_0': inputs_unit[self.inputs[0]],
'r_eff': inputs_unit[self.inputs[0]],
'theta': u.rad,
'amplitude': outputs_unit[self.outputs[0]]}
class KingProjectedAnalytic1D(Fittable1DModel):
"""
Projected (surface density) analytic King Model.
Parameters
----------
amplitude : float
Amplitude or scaling factor.
r_core : float
Core radius (f(r_c) ~ 0.5 f_0)
r_tide : float
Tidal radius.
Notes
-----
This model approximates a King model with an analytic function. The derivation of this
equation can be found in King '62 (equation 14). This is just an approximation of the
full model and the parameters derived from this model should be taken with caution.
It usually works for models with a concentration (c = log10(r_t/r_c) parameter < 2.
Model formula:
.. math::
f(x) = A r_c^2 \\left(\\frac{1}{\\sqrt{(x^2 + r_c^2)}} -
\\frac{1}{\\sqrt{(r_t^2 + r_c^2)}}\\right)^2
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import KingProjectedAnalytic1D
import matplotlib.pyplot as plt
plt.figure()
rt_list = [1, 2, 5, 10, 20]
for rt in rt_list:
r = np.linspace(0.1, rt, 100)
mod = KingProjectedAnalytic1D(amplitude = 1, r_core = 1., r_tide = rt)
sig = mod(r)
plt.loglog(r, sig/sig[0], label=f"c ~ {mod.concentration:0.2f}")
plt.xlabel("r")
plt.ylabel(r"$\\sigma/\\sigma_0$")
plt.legend()
plt.show()
References
----------
.. [1] https://ui.adsabs.harvard.edu/abs/1962AJ.....67..471K
"""
amplitude = Parameter(default=1, bounds=(FLOAT_EPSILON, None),
description="Amplitude or scaling factor")
r_core = Parameter(default=1, bounds=(FLOAT_EPSILON, None),
description="Core Radius")
r_tide = Parameter(default=2, bounds=(FLOAT_EPSILON, None),
description="Tidal Radius")
@property
def concentration(self):
"""Concentration parameter of the king model"""
return np.log10(np.abs(self.r_tide/self.r_core))
@staticmethod
def evaluate(x, amplitude, r_core, r_tide):
"""
Analytic King model function.
"""
result = amplitude * r_core ** 2 * (1/np.sqrt(x ** 2 + r_core ** 2) -
1/np.sqrt(r_tide ** 2 + r_core ** 2)) ** 2
# Set invalid r values to 0
bounds = (x >= r_tide) | (x < 0)
result[bounds] = result[bounds] * 0.
return result
@staticmethod
def fit_deriv(x, amplitude, r_core, r_tide):
"""
Analytic King model function derivatives.
"""
d_amplitude = r_core ** 2 * (1/np.sqrt(x ** 2 + r_core ** 2) -
1/np.sqrt(r_tide ** 2 + r_core ** 2)) ** 2
d_r_core = 2 * amplitude * r_core ** 2 * (r_core/(r_core ** 2 + r_tide ** 2) ** (3/2) -
r_core/(r_core ** 2 + x ** 2) ** (3/2)) * \
(1./np.sqrt(r_core ** 2 + x ** 2) - 1./np.sqrt(r_core ** 2 + r_tide ** 2)) + \
2 * amplitude * r_core * (1./np.sqrt(r_core ** 2 + x ** 2) -
1./np.sqrt(r_core ** 2 + r_tide ** 2)) ** 2
d_r_tide = (2 * amplitude * r_core ** 2 * r_tide *
(1./np.sqrt(r_core ** 2 + x ** 2) -
1./np.sqrt(r_core ** 2 + r_tide ** 2)))/(r_core ** 2 + r_tide ** 2) ** (3/2)
# Set invalid r values to 0
bounds = (x >= r_tide) | (x < 0)
d_amplitude[bounds] = d_amplitude[bounds]*0
d_r_core[bounds] = d_r_core[bounds]*0
d_r_tide[bounds] = d_r_tide[bounds]*0
return [d_amplitude, d_r_core, d_r_tide]
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
The model is not defined for r > r_tide.
``(r_low, r_high)``
"""
return (0 * self.r_tide, 1 * self.r_tide)
@property
def input_units(self):
if self.r_core.unit is None:
return None
return {self.inputs[0]: self.r_core.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'r_core': inputs_unit[self.inputs[0]],
'r_tide': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Logarithmic1D(Fittable1DModel):
"""
One dimensional logarithmic model.
Parameters
----------
amplitude : float, optional
tau : float, optional
See Also
--------
Exponential1D, Gaussian1D
"""
amplitude = Parameter(default=1)
tau = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, tau):
return amplitude * np.log(x / tau)
@staticmethod
def fit_deriv(x, amplitude, tau):
d_amplitude = np.log(x / tau)
d_tau = np.zeros(x.shape) - (amplitude / tau)
return [d_amplitude, d_tau]
@property
def inverse(self):
new_amplitude = self.tau
new_tau = self.amplitude
return Exponential1D(amplitude=new_amplitude, tau=new_tau)
@tau.validator
def tau(self, val):
if np.all(val == 0):
raise ValueError("0 is not an allowed value for tau")
@property
def input_units(self):
if self.tau.unit is None:
return None
return {self.inputs[0]: self.tau.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'tau': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Exponential1D(Fittable1DModel):
"""
One dimensional exponential model.
Parameters
----------
amplitude : float, optional
tau : float, optional
See Also
--------
Logarithmic1D, Gaussian1D
"""
amplitude = Parameter(default=1)
tau = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, tau):
return amplitude * np.exp(x / tau)
@staticmethod
def fit_deriv(x, amplitude, tau):
''' Derivative with respect to parameters'''
d_amplitude = np.exp(x / tau)
d_tau = -amplitude * (x / tau**2) * np.exp(x / tau)
return [d_amplitude, d_tau]
@property
def inverse(self):
new_amplitude = self.tau
new_tau = self.amplitude
return Logarithmic1D(amplitude=new_amplitude, tau=new_tau)
@tau.validator
def tau(self, val):
''' tau cannot be 0'''
if np.all(val == 0):
raise ValueError("0 is not an allowed value for tau")
@property
def input_units(self):
if self.tau.unit is None:
return None
return {self.inputs[0]: self.tau.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'tau': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
|
1f3b3feeb30b763b9b8f9a23b470420a9087f51f5116a27945d83b5c9b2383f2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
"""
Implements projections--particularly sky projections defined in WCS Paper II
[1]_.
All angles are set and and displayed in degrees but internally computations are
performed in radians. All functions expect inputs and outputs degrees.
References
----------
.. [1] Calabretta, M.R., Greisen, E.W., 2002, A&A, 395, 1077 (Paper II)
"""
import abc
from itertools import chain, product
import numpy as np
from astropy import units as u
from astropy import wcs
from .core import Model
from .parameters import InputParameterError, Parameter
from .utils import _to_orig_unit, _to_radian
# List of tuples of the form
# (long class name without suffix, short WCSLIB projection code):
_PROJ_NAME_CODE = [
('ZenithalPerspective', 'AZP'),
('SlantZenithalPerspective', 'SZP'),
('Gnomonic', 'TAN'),
('Stereographic', 'STG'),
('SlantOrthographic', 'SIN'),
('ZenithalEquidistant', 'ARC'),
('ZenithalEqualArea', 'ZEA'),
('Airy', 'AIR'),
('CylindricalPerspective', 'CYP'),
('CylindricalEqualArea', 'CEA'),
('PlateCarree', 'CAR'),
('Mercator', 'MER'),
('SansonFlamsteed', 'SFL'),
('Parabolic', 'PAR'),
('Molleweide', 'MOL'),
('HammerAitoff', 'AIT'),
('ConicPerspective', 'COP'),
('ConicEqualArea', 'COE'),
('ConicEquidistant', 'COD'),
('ConicOrthomorphic', 'COO'),
('BonneEqualArea', 'BON'),
('Polyconic', 'PCO'),
('TangentialSphericalCube', 'TSC'),
('COBEQuadSphericalCube', 'CSC'),
('QuadSphericalCube', 'QSC'),
('HEALPix', 'HPX'),
('HEALPixPolar', 'XPH'),
]
_NOT_SUPPORTED_PROJ_CODES = ['ZPN']
_PROJ_NAME_CODE_MAP = dict(_PROJ_NAME_CODE)
projcodes = [code for _, code in _PROJ_NAME_CODE]
__all__ = [
'Projection', 'Pix2SkyProjection', 'Sky2PixProjection', 'Zenithal',
'Cylindrical', 'PseudoCylindrical', 'Conic', 'PseudoConic', 'QuadCube',
'HEALPix', 'AffineTransformation2D', 'projcodes'
] + list(map('_'.join, product(['Pix2Sky', 'Sky2Pix'], chain(*_PROJ_NAME_CODE))))
class _ParameterDS(Parameter):
"""
Same as `Parameter` but can indicate its modified status via the ``dirty``
property. This flag also gets set automatically when a parameter is
modified.
This ability to track parameter's modified status is needed for automatic
update of WCSLIB's prjprm structure (which may be a more-time intensive
operation) *only as required*.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dirty = True
def validate(self, value):
super().validate(value)
self.dirty = True
class Projection(Model):
"""Base class for all sky projections."""
# Radius of the generating sphere.
# This sets the circumference to 360 deg so that arc length is measured in deg.
r0 = 180 * u.deg / np.pi
_separable = False
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._prj = wcs.Prjprm()
@property
@abc.abstractmethod
def inverse(self):
"""
Inverse projection--all projection models must provide an inverse.
"""
@property
def prjprm(self):
""" WCSLIB ``prjprm`` structure. """
self._update_prj()
return self._prj
def _update_prj(self):
"""
A default updater for projection's pv.
.. warning::
This method assumes that PV0 is never modified. If a projection
that uses PV0 is ever implemented in this module, that projection
class should override this method.
.. warning::
This method assumes that the order in which PVi values (i>0)
are to be asigned is identical to the order of model parameters
in ``param_names``. That is, pv[1] = model.parameters[0], ...
"""
if not self.param_names:
return
pv = []
dirty = False
for p in self.param_names:
param = getattr(self, p)
pv.append(float(param.value))
dirty |= param.dirty
param.dirty = False
if dirty:
self._prj.pv = None, *pv
self._prj.set()
class Pix2SkyProjection(Projection):
"""Base class for all Pix2Sky projections."""
n_inputs = 2
n_outputs = 2
_input_units_strict = True
_input_units_allow_dimensionless = True
def __new__(cls, *args, **kwargs):
long_name = cls.name.split('_')[1]
cls.prj_code = _PROJ_NAME_CODE_MAP[long_name]
return super().__new__(cls)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._prj.code = self.prj_code
self._update_prj()
if not self.param_names:
# force initial call to Prjprm.set() for projections
# with no parameters:
self._prj.set()
self.inputs = ('x', 'y')
self.outputs = ('phi', 'theta')
@property
def input_units(self):
return {self.inputs[0]: u.deg,
self.inputs[1]: u.deg}
@property
def return_units(self):
return {self.outputs[0]: u.deg,
self.outputs[1]: u.deg}
def evaluate(self, x, y, *args, **kwargs):
self._update_prj()
return self._prj.prjx2s(x, y)
@property
def inverse(self):
pv = [getattr(self, param).value for param in self.param_names]
return self._inv_cls(*pv)
class Sky2PixProjection(Projection):
"""Base class for all Sky2Pix projections."""
n_inputs = 2
n_outputs = 2
_input_units_strict = True
_input_units_allow_dimensionless = True
def __new__(cls, *args, **kwargs):
long_name = cls.name.split('_')[1]
cls.prj_code = _PROJ_NAME_CODE_MAP[long_name]
return super().__new__(cls)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._prj.code = self.prj_code
self._update_prj()
if not self.param_names:
# force initial call to Prjprm.set() for projections
# without parameters:
self._prj.set()
self.inputs = ('phi', 'theta')
self.outputs = ('x', 'y')
@property
def input_units(self):
return {self.inputs[0]: u.deg,
self.inputs[1]: u.deg}
@property
def return_units(self):
return {self.outputs[0]: u.deg,
self.outputs[1]: u.deg}
def evaluate(self, phi, theta, *args, **kwargs):
self._update_prj()
return self._prj.prjs2x(phi, theta)
@property
def inverse(self):
pv = [getattr(self, param).value for param in self.param_names]
return self._inv_cls(*pv)
class Zenithal(Projection):
r"""Base class for all Zenithal projections.
Zenithal (or azimuthal) projections map the sphere directly onto a
plane. All zenithal projections are specified by defining the
radius as a function of native latitude, :math:`R_\theta`.
The pixel-to-sky transformation is defined as:
.. math::
\phi &= \arg(-y, x) \\
R_\theta &= \sqrt{x^2 + y^2}
and the inverse (sky-to-pixel) is defined as:
.. math::
x &= R_\theta \sin \phi \\
y &= R_\theta \cos \phi
"""
class Pix2Sky_ZenithalPerspective(Pix2SkyProjection, Zenithal):
r"""
Zenithal perspective projection - pixel to sky.
Corresponds to the ``AZP`` projection in FITS WCS.
.. math::
\phi &= \arg(-y \cos \gamma, x) \\
\theta &= \left\{\genfrac{}{}{0pt}{}{\psi - \omega}{\psi + \omega + 180^{\circ}}\right.
where:
.. math::
\psi &= \arg(\rho, 1) \\
\omega &= \sin^{-1}\left(\frac{\rho \mu}{\sqrt{\rho^2 + 1}}\right) \\
\rho &= \frac{R}{\frac{180^{\circ}}{\pi}(\mu + 1) + y \sin \gamma} \\
R &= \sqrt{x^2 + y^2 \cos^2 \gamma}
Parameters
----------
mu : float
Distance from point of projection to center of sphere
in spherical radii, μ. Default is 0.
gamma : float
Look angle γ in degrees. Default is 0°.
"""
mu = _ParameterDS(
default=0.0, description="Distance from point of projection to center of sphere"
)
gamma = _ParameterDS(default=0.0, getter=_to_orig_unit, setter=_to_radian,
description="Look angle γ in degrees (Default = 0°)")
@mu.validator
def mu(self, value):
if np.any(np.equal(value, -1.0)):
raise InputParameterError(
"Zenithal perspective projection is not defined for mu = -1")
class Sky2Pix_ZenithalPerspective(Sky2PixProjection, Zenithal):
r"""
Zenithal perspective projection - sky to pixel.
Corresponds to the ``AZP`` projection in FITS WCS.
.. math::
x &= R \sin \phi \\
y &= -R \sec \gamma \cos \theta
where:
.. math::
R = \frac{180^{\circ}}{\pi} \frac{(\mu + 1) \cos \theta}
{(\mu + \sin \theta) + \cos \theta \cos \phi \tan \gamma}
Parameters
----------
mu : float
Distance from point of projection to center of sphere
in spherical radii, μ. Default is 0.
gamma : float
Look angle γ in degrees. Default is 0°.
"""
mu = _ParameterDS(
default=0.0,
description="Distance from point of projection to center of sphere"
)
gamma = _ParameterDS(default=0.0, getter=_to_orig_unit, setter=_to_radian,
description="Look angle γ in degrees (Default=0°)")
@mu.validator
def mu(self, value):
if np.any(np.equal(value, -1.0)):
raise InputParameterError(
"Zenithal perspective projection is not defined for mu = -1")
class Pix2Sky_SlantZenithalPerspective(Pix2SkyProjection, Zenithal):
r"""
Slant zenithal perspective projection - pixel to sky.
Corresponds to the ``SZP`` projection in FITS WCS.
Parameters
----------
mu : float
Distance from point of projection to center of sphere
in spherical radii, μ. Default is 0.
phi0 : float
The longitude φ₀ of the reference point, in degrees. Default
is 0°.
theta0 : float
The latitude θ₀ of the reference point, in degrees. Default
is 90°.
"""
mu = _ParameterDS(
default=0.0,
description="Distance from point of projection to center of sphere"
)
phi0 = _ParameterDS(
default=0.0, getter=_to_orig_unit, setter=_to_radian,
description="The longitude φ₀ of the reference point in degrees (Default=0°)"
)
theta0 = _ParameterDS(
default=90.0, getter=_to_orig_unit, setter=_to_radian,
description="The latitude θ₀ of the reference point, in degrees (Default=0°)"
)
@mu.validator
def mu(self, value):
if np.any(np.equal(value, -1.0)):
raise InputParameterError(
"Zenithal perspective projection is not defined for mu = -1")
class Sky2Pix_SlantZenithalPerspective(Sky2PixProjection, Zenithal):
r"""
Zenithal perspective projection - sky to pixel.
Corresponds to the ``SZP`` projection in FITS WCS.
Parameters
----------
mu : float
distance from point of projection to center of sphere
in spherical radii, μ. Default is 0.
phi0 : float
The longitude φ₀ of the reference point, in degrees. Default
is 0°.
theta0 : float
The latitude θ₀ of the reference point, in degrees. Default
is 90°.
"""
mu = _ParameterDS(
default=0.0, description="Distance from point of projection to center of sphere"
)
phi0 = _ParameterDS(
default=0.0, getter=_to_orig_unit, setter=_to_radian,
description="The longitude φ₀ of the reference point in degrees"
)
theta0 = _ParameterDS(
default=0.0, getter=_to_orig_unit, setter=_to_radian,
description="The latitude θ₀ of the reference point, in degrees"
)
@mu.validator
def mu(self, value):
if np.any(np.equal(value, -1.0)):
raise InputParameterError(
"Zenithal perspective projection is not defined for mu = -1")
class Pix2Sky_Gnomonic(Pix2SkyProjection, Zenithal):
r"""
Gnomonic projection - pixel to sky.
Corresponds to the ``TAN`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
\theta = \tan^{-1}\left(\frac{180^{\circ}}{\pi R_\theta}\right)
"""
class Sky2Pix_Gnomonic(Sky2PixProjection, Zenithal):
r"""
Gnomonic Projection - sky to pixel.
Corresponds to the ``TAN`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
R_\theta = \frac{180^{\circ}}{\pi}\cot \theta
"""
class Pix2Sky_Stereographic(Pix2SkyProjection, Zenithal):
r"""
Stereographic Projection - pixel to sky.
Corresponds to the ``STG`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
\theta = 90^{\circ} - 2 \tan^{-1}\left(\frac{\pi R_\theta}{360^{\circ}}\right)
"""
class Sky2Pix_Stereographic(Sky2PixProjection, Zenithal):
r"""
Stereographic Projection - sky to pixel.
Corresponds to the ``STG`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
R_\theta = \frac{180^{\circ}}{\pi}\frac{2 \cos \theta}{1 + \sin \theta}
"""
class Pix2Sky_SlantOrthographic(Pix2SkyProjection, Zenithal):
r"""
Slant orthographic projection - pixel to sky.
Corresponds to the ``SIN`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
The following transformation applies when :math:`\xi` and
:math:`\eta` are both zero.
.. math::
\theta = \cos^{-1}\left(\frac{\pi}{180^{\circ}}R_\theta\right)
The parameters :math:`\xi` and :math:`\eta` are defined from the
reference point :math:`(\phi_c, \theta_c)` as:
.. math::
\xi &= \cot \theta_c \sin \phi_c \\
\eta &= - \cot \theta_c \cos \phi_c
Parameters
----------
xi : float
Obliqueness parameter, ξ. Default is 0.0.
eta : float
Obliqueness parameter, η. Default is 0.0.
"""
xi = _ParameterDS(default=0.0, description="Obliqueness parameter")
eta = _ParameterDS(default=0.0, description="Obliqueness parameter")
class Sky2Pix_SlantOrthographic(Sky2PixProjection, Zenithal):
r"""
Slant orthographic projection - sky to pixel.
Corresponds to the ``SIN`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
The following transformation applies when :math:`\xi` and
:math:`\eta` are both zero.
.. math::
R_\theta = \frac{180^{\circ}}{\pi}\cos \theta
But more specifically are:
.. math::
x &= \frac{180^\circ}{\pi}[\cos \theta \sin \phi + \xi(1 - \sin \theta)] \\
y &= \frac{180^\circ}{\pi}[\cos \theta \cos \phi + \eta(1 - \sin \theta)]
"""
xi = _ParameterDS(default=0.0)
eta = _ParameterDS(default=0.0)
class Pix2Sky_ZenithalEquidistant(Pix2SkyProjection, Zenithal):
r"""
Zenithal equidistant projection - pixel to sky.
Corresponds to the ``ARC`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
\theta = 90^\circ - R_\theta
"""
class Sky2Pix_ZenithalEquidistant(Sky2PixProjection, Zenithal):
r"""
Zenithal equidistant projection - sky to pixel.
Corresponds to the ``ARC`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
R_\theta = 90^\circ - \theta
"""
class Pix2Sky_ZenithalEqualArea(Pix2SkyProjection, Zenithal):
r"""
Zenithal equidistant projection - pixel to sky.
Corresponds to the ``ZEA`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
\theta = 90^\circ - 2 \sin^{-1} \left(\frac{\pi R_\theta}{360^\circ}\right)
"""
class Sky2Pix_ZenithalEqualArea(Sky2PixProjection, Zenithal):
r"""
Zenithal equidistant projection - sky to pixel.
Corresponds to the ``ZEA`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
R_\theta &= \frac{180^\circ}{\pi} \sqrt{2(1 - \sin\theta)} \\
&= \frac{360^\circ}{\pi} \sin\left(\frac{90^\circ - \theta}{2}\right)
"""
class Pix2Sky_Airy(Pix2SkyProjection, Zenithal):
r"""
Airy projection - pixel to sky.
Corresponds to the ``AIR`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
Parameters
----------
theta_b : float
The latitude :math:`\theta_b` at which to minimize the error,
in degrees. Default is 90°.
"""
theta_b = _ParameterDS(default=90.0)
class Sky2Pix_Airy(Sky2PixProjection, Zenithal):
r"""
Airy - sky to pixel.
Corresponds to the ``AIR`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
R_\theta = -2 \frac{180^\circ}{\pi}\left(\frac{\ln(\cos \xi)}{\tan \xi} +
\frac{\ln(\cos \xi_b)}{\tan^2 \xi_b} \tan \xi \right)
where:
.. math::
\xi &= \frac{90^\circ - \theta}{2} \\
\xi_b &= \frac{90^\circ - \theta_b}{2}
Parameters
----------
theta_b : float
The latitude :math:`\theta_b` at which to minimize the error,
in degrees. Default is 90°.
"""
theta_b = _ParameterDS(default=90.0,
description="The latitude at which to minimize the error,in degrees")
class Cylindrical(Projection):
r"""Base class for Cylindrical projections.
Cylindrical projections are so-named because the surface of
projection is a cylinder.
"""
_separable = True
class Pix2Sky_CylindricalPerspective(Pix2SkyProjection, Cylindrical):
r"""
Cylindrical perspective - pixel to sky.
Corresponds to the ``CYP`` projection in FITS WCS.
.. math::
\phi &= \frac{x}{\lambda} \\
\theta &= \arg(1, \eta) + \sin{-1}\left(\frac{\eta \mu}{\sqrt{\eta^2 + 1}}\right)
where:
.. math::
\eta = \frac{\pi}{180^{\circ}}\frac{y}{\mu + \lambda}
Parameters
----------
mu : float
Distance from center of sphere in the direction opposite the
projected surface, in spherical radii, μ. Default is 1.
lam : float
Radius of the cylinder in spherical radii, λ. Default is 1.
"""
mu = _ParameterDS(default=1.0)
lam = _ParameterDS(default=1.0)
@mu.validator
def mu(self, value):
if np.any(value == -self.lam):
raise InputParameterError(
"CYP projection is not defined for mu = -lambda")
@lam.validator
def lam(self, value):
if np.any(value == -self.mu):
raise InputParameterError(
"CYP projection is not defined for lambda = -mu")
class Sky2Pix_CylindricalPerspective(Sky2PixProjection, Cylindrical):
r"""
Cylindrical Perspective - sky to pixel.
Corresponds to the ``CYP`` projection in FITS WCS.
.. math::
x &= \lambda \phi \\
y &= \frac{180^{\circ}}{\pi}\left(\frac{\mu + \lambda}{\mu + \cos \theta}\right)\sin \theta
Parameters
----------
mu : float
Distance from center of sphere in the direction opposite the
projected surface, in spherical radii, μ. Default is 0.
lam : float
Radius of the cylinder in spherical radii, λ. Default is 0.
"""
mu = _ParameterDS(default=1.0, description="Distance from center of sphere in spherical radii")
lam = _ParameterDS(default=1.0, description="Radius of the cylinder in spherical radii")
@mu.validator
def mu(self, value):
if np.any(value == -self.lam):
raise InputParameterError(
"CYP projection is not defined for mu = -lambda")
@lam.validator
def lam(self, value):
if np.any(value == -self.mu):
raise InputParameterError(
"CYP projection is not defined for lambda = -mu")
class Pix2Sky_CylindricalEqualArea(Pix2SkyProjection, Cylindrical):
r"""
Cylindrical equal area projection - pixel to sky.
Corresponds to the ``CEA`` projection in FITS WCS.
.. math::
\phi &= x \\
\theta &= \sin^{-1}\left(\frac{\pi}{180^{\circ}}\lambda y\right)
Parameters
----------
lam : float
Radius of the cylinder in spherical radii, λ. Default is 1.
"""
lam = _ParameterDS(default=1)
class Sky2Pix_CylindricalEqualArea(Sky2PixProjection, Cylindrical):
r"""
Cylindrical equal area projection - sky to pixel.
Corresponds to the ``CEA`` projection in FITS WCS.
.. math::
x &= \phi \\
y &= \frac{180^{\circ}}{\pi}\frac{\sin \theta}{\lambda}
Parameters
----------
lam : float
Radius of the cylinder in spherical radii, λ. Default is 0.
"""
lam = _ParameterDS(default=1)
class Pix2Sky_PlateCarree(Pix2SkyProjection, Cylindrical):
r"""
Plate carrée projection - pixel to sky.
Corresponds to the ``CAR`` projection in FITS WCS.
.. math::
\phi &= x \\
\theta &= y
"""
@staticmethod
def evaluate(x, y):
# The intermediate variables are only used here for clarity
phi = np.array(x)
theta = np.array(y)
return phi, theta
class Sky2Pix_PlateCarree(Sky2PixProjection, Cylindrical):
r"""
Plate carrée projection - sky to pixel.
Corresponds to the ``CAR`` projection in FITS WCS.
.. math::
x &= \phi \\
y &= \theta
"""
@staticmethod
def evaluate(phi, theta):
# The intermediate variables are only used here for clarity
x = np.array(phi)
y = np.array(theta)
return x, y
class Pix2Sky_Mercator(Pix2SkyProjection, Cylindrical):
r"""
Mercator - pixel to sky.
Corresponds to the ``MER`` projection in FITS WCS.
.. math::
\phi &= x \\
\theta &= 2 \tan^{-1}\left(e^{y \pi / 180^{\circ}}\right)-90^{\circ}
"""
class Sky2Pix_Mercator(Sky2PixProjection, Cylindrical):
r"""
Mercator - sky to pixel.
Corresponds to the ``MER`` projection in FITS WCS.
.. math::
x &= \phi \\
y &= \frac{180^{\circ}}{\pi}\ln \tan \left(\frac{90^{\circ} + \theta}{2}\right)
"""
class PseudoCylindrical(Projection):
r"""Base class for pseudocylindrical projections.
Pseudocylindrical projections are like cylindrical projections
except the parallels of latitude are projected at diminishing
lengths toward the polar regions in order to reduce lateral
distortion there. Consequently, the meridians are curved.
"""
_separable = True
class Pix2Sky_SansonFlamsteed(Pix2SkyProjection, PseudoCylindrical):
r"""
Sanson-Flamsteed projection - pixel to sky.
Corresponds to the ``SFL`` projection in FITS WCS.
.. math::
\phi &= \frac{x}{\cos y} \\
\theta &= y
"""
class Sky2Pix_SansonFlamsteed(Sky2PixProjection, PseudoCylindrical):
r"""
Sanson-Flamsteed projection - sky to pixel.
Corresponds to the ``SFL`` projection in FITS WCS.
.. math::
x &= \phi \cos \theta \\
y &= \theta
"""
class Pix2Sky_Parabolic(Pix2SkyProjection, PseudoCylindrical):
r"""
Parabolic projection - pixel to sky.
Corresponds to the ``PAR`` projection in FITS WCS.
.. math::
\phi &= \frac{180^\circ}{\pi} \frac{x}{1 - 4(y / 180^\circ)^2} \\
\theta &= 3 \sin^{-1}\left(\frac{y}{180^\circ}\right)
"""
class Sky2Pix_Parabolic(Sky2PixProjection, PseudoCylindrical):
r"""
Parabolic projection - sky to pixel.
Corresponds to the ``PAR`` projection in FITS WCS.
.. math::
x &= \phi \left(2\cos\frac{2\theta}{3} - 1\right) \\
y &= 180^\circ \sin \frac{\theta}{3}
"""
class Pix2Sky_Molleweide(Pix2SkyProjection, PseudoCylindrical):
r"""
Molleweide's projection - pixel to sky.
Corresponds to the ``MOL`` projection in FITS WCS.
.. math::
\phi &= \frac{\pi x}{2 \sqrt{2 - \left(\frac{\pi}{180^\circ}y\right)^2}} \\
\theta &= \sin^{-1}\left(
\frac{1}{90^\circ}\sin^{-1}\left(\frac{\pi}{180^\circ}\frac{y}{\sqrt{2}}\right)
+ \frac{y}{180^\circ}\sqrt{2 - \left(\frac{\pi}{180^\circ}y\right)^2}
\right)
"""
class Sky2Pix_Molleweide(Sky2PixProjection, PseudoCylindrical):
r"""
Molleweide's projection - sky to pixel.
Corresponds to the ``MOL`` projection in FITS WCS.
.. math::
x &= \frac{2 \sqrt{2}}{\pi} \phi \cos \gamma \\
y &= \sqrt{2} \frac{180^\circ}{\pi} \sin \gamma
where :math:`\gamma` is defined as the solution of the
transcendental equation:
.. math::
\sin \theta = \frac{\gamma}{90^\circ} + \frac{\sin 2 \gamma}{\pi}
"""
class Pix2Sky_HammerAitoff(Pix2SkyProjection, PseudoCylindrical):
r"""
Hammer-Aitoff projection - pixel to sky.
Corresponds to the ``AIT`` projection in FITS WCS.
.. math::
\phi &= 2 \arg \left(2Z^2 - 1, \frac{\pi}{180^\circ} \frac{Z}{2}x\right) \\
\theta &= \sin^{-1}\left(\frac{\pi}{180^\circ}yZ\right)
"""
class Sky2Pix_HammerAitoff(Sky2PixProjection, PseudoCylindrical):
r"""
Hammer-Aitoff projection - sky to pixel.
Corresponds to the ``AIT`` projection in FITS WCS.
.. math::
x &= 2 \gamma \cos \theta \sin \frac{\phi}{2} \\
y &= \gamma \sin \theta
where:
.. math::
\gamma = \frac{180^\circ}{\pi} \sqrt{\frac{2}{1 + \cos \theta \cos(\phi / 2)}}
"""
class Conic(Projection):
r"""Base class for conic projections.
In conic projections, the sphere is thought to be projected onto
the surface of a cone which is then opened out.
In a general sense, the pixel-to-sky transformation is defined as:
.. math::
\phi &= \arg\left(\frac{Y_0 - y}{R_\theta}, \frac{x}{R_\theta}\right) / C \\
R_\theta &= \mathrm{sign} \theta_a \sqrt{x^2 + (Y_0 - y)^2}
and the inverse (sky-to-pixel) is defined as:
.. math::
x &= R_\theta \sin (C \phi) \\
y &= R_\theta \cos (C \phi) + Y_0
where :math:`C` is the "constant of the cone":
.. math::
C = \frac{180^\circ \cos \theta}{\pi R_\theta}
"""
sigma = _ParameterDS(default=90.0, getter=_to_orig_unit, setter=_to_radian)
delta = _ParameterDS(default=0.0, getter=_to_orig_unit, setter=_to_radian)
class Pix2Sky_ConicPerspective(Pix2SkyProjection, Conic):
r"""
Colles' conic perspective projection - pixel to sky.
Corresponds to the ``COP`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \sin \theta_a \\
R_\theta &= \frac{180^\circ}{\pi} \cos \eta [ \cot \theta_a - \tan(\theta - \theta_a)] \\
Y_0 &= \frac{180^\circ}{\pi} \cos \eta \cot \theta_a
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
class Sky2Pix_ConicPerspective(Sky2PixProjection, Conic):
r"""
Colles' conic perspective projection - sky to pixel.
Corresponds to the ``COP`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \sin \theta_a \\
R_\theta &= \frac{180^\circ}{\pi} \cos \eta [ \cot \theta_a - \tan(\theta - \theta_a)] \\
Y_0 &= \frac{180^\circ}{\pi} \cos \eta \cot \theta_a
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
class Pix2Sky_ConicEqualArea(Pix2SkyProjection, Conic):
r"""
Alber's conic equal area projection - pixel to sky.
Corresponds to the ``COE`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \gamma / 2 \\
R_\theta &= \frac{180^\circ}{\pi} \frac{2}{\gamma}
\sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin \theta} \\
Y_0 &= \frac{180^\circ}{\pi} \frac{2}{\gamma}
\sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin((\theta_1 + \theta_2)/2)}
where:
.. math::
\gamma = \sin \theta_1 + \sin \theta_2
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
class Sky2Pix_ConicEqualArea(Sky2PixProjection, Conic):
r"""
Alber's conic equal area projection - sky to pixel.
Corresponds to the ``COE`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \gamma / 2 \\
R_\theta &= \frac{180^\circ}{\pi} \frac{2}{\gamma}
\sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin \theta} \\
Y_0 &= \frac{180^\circ}{\pi} \frac{2}{\gamma}
\sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin((\theta_1 + \theta_2)/2)}
where:
.. math::
\gamma = \sin \theta_1 + \sin \theta_2
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
class Pix2Sky_ConicEquidistant(Pix2SkyProjection, Conic):
r"""
Conic equidistant projection - pixel to sky.
Corresponds to the ``COD`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \frac{180^\circ}{\pi} \frac{\sin\theta_a\sin\eta}{\eta} \\
R_\theta &= \theta_a - \theta + \eta\cot\eta\cot\theta_a \\
Y_0 = \eta\cot\eta\cot\theta_a
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
class Sky2Pix_ConicEquidistant(Sky2PixProjection, Conic):
r"""
Conic equidistant projection - sky to pixel.
Corresponds to the ``COD`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \frac{180^\circ}{\pi} \frac{\sin\theta_a\sin\eta}{\eta} \\
R_\theta &= \theta_a - \theta + \eta\cot\eta\cot\theta_a \\
Y_0 = \eta\cot\eta\cot\theta_a
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
class Pix2Sky_ConicOrthomorphic(Pix2SkyProjection, Conic):
r"""
Conic orthomorphic projection - pixel to sky.
Corresponds to the ``COO`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \frac{\ln \left( \frac{\cos\theta_2}{\cos\theta_1} \right)}
{\ln \left[ \frac{\tan\left(\frac{90^\circ-\theta_2}{2}\right)}
{\tan\left(\frac{90^\circ-\theta_1}{2}\right)} \right] } \\
R_\theta &= \psi \left[ \tan \left( \frac{90^\circ - \theta}{2} \right) \right]^C \\
Y_0 &= \psi \left[ \tan \left( \frac{90^\circ - \theta_a}{2} \right) \right]^C
where:
.. math::
\psi = \frac{180^\circ}{\pi} \frac{\cos \theta}
{C\left[\tan\left(\frac{90^\circ-\theta}{2}\right)\right]^C}
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
class Sky2Pix_ConicOrthomorphic(Sky2PixProjection, Conic):
r"""
Conic orthomorphic projection - sky to pixel.
Corresponds to the ``COO`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulae are:
.. math::
C &= \frac{\ln \left( \frac{\cos\theta_2}{\cos\theta_1} \right)}
{\ln \left[ \frac{\tan\left(\frac{90^\circ-\theta_2}{2}\right)}
{\tan\left(\frac{90^\circ-\theta_1}{2}\right)} \right] } \\
R_\theta &= \psi \left[ \tan \left( \frac{90^\circ - \theta}{2} \right) \right]^C \\
Y_0 &= \psi \left[ \tan \left( \frac{90^\circ - \theta_a}{2} \right) \right]^C
where:
.. math::
\psi = \frac{180^\circ}{\pi} \frac{\cos \theta}
{C\left[\tan\left(\frac{90^\circ-\theta}{2}\right)\right]^C}
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
class PseudoConic(Projection):
r"""Base class for pseudoconic projections.
Pseudoconics are a subclass of conics with concentric parallels.
"""
class Pix2Sky_BonneEqualArea(Pix2SkyProjection, PseudoConic):
r"""
Bonne's equal area pseudoconic projection - pixel to sky.
Corresponds to the ``BON`` projection in FITS WCS.
.. math::
\phi &= \frac{\pi}{180^\circ} A_\phi R_\theta / \cos \theta \\
\theta &= Y_0 - R_\theta
where:
.. math::
R_\theta &= \mathrm{sign} \theta_1 \sqrt{x^2 + (Y_0 - y)^2} \\
A_\phi &= \arg\left(\frac{Y_0 - y}{R_\theta}, \frac{x}{R_\theta}\right)
Parameters
----------
theta1 : float
Bonne conformal latitude, in degrees.
"""
_separable = True
theta1 = _ParameterDS(default=0.0, getter=_to_orig_unit, setter=_to_radian)
class Sky2Pix_BonneEqualArea(Sky2PixProjection, PseudoConic):
r"""
Bonne's equal area pseudoconic projection - sky to pixel.
Corresponds to the ``BON`` projection in FITS WCS.
.. math::
x &= R_\theta \sin A_\phi \\
y &= -R_\theta \cos A_\phi + Y_0
where:
.. math::
A_\phi &= \frac{180^\circ}{\pi R_\theta} \phi \cos \theta \\
R_\theta &= Y_0 - \theta \\
Y_0 &= \frac{180^\circ}{\pi} \cot \theta_1 + \theta_1
Parameters
----------
theta1 : float
Bonne conformal latitude, in degrees.
"""
_separable = True
theta1 = _ParameterDS(default=0.0, getter=_to_orig_unit, setter=_to_radian,
description="Bonne conformal latitude, in degrees")
class Pix2Sky_Polyconic(Pix2SkyProjection, PseudoConic):
r"""
Polyconic projection - pixel to sky.
Corresponds to the ``PCO`` projection in FITS WCS.
"""
class Sky2Pix_Polyconic(Sky2PixProjection, PseudoConic):
r"""
Polyconic projection - sky to pixel.
Corresponds to the ``PCO`` projection in FITS WCS.
"""
class QuadCube(Projection):
r"""Base class for quad cube projections.
Quadrilateralized spherical cube (quad-cube) projections belong to
the class of polyhedral projections in which the sphere is
projected onto the surface of an enclosing polyhedron.
The six faces of the quad-cube projections are numbered and laid
out as::
0
4 3 2 1 4 3 2
5
"""
class Pix2Sky_TangentialSphericalCube(Pix2SkyProjection, QuadCube):
r"""
Tangential spherical cube projection - pixel to sky.
Corresponds to the ``TSC`` projection in FITS WCS.
"""
class Sky2Pix_TangentialSphericalCube(Sky2PixProjection, QuadCube):
r"""
Tangential spherical cube projection - sky to pixel.
Corresponds to the ``TSC`` projection in FITS WCS.
"""
class Pix2Sky_COBEQuadSphericalCube(Pix2SkyProjection, QuadCube):
r"""
COBE quadrilateralized spherical cube projection - pixel to sky.
Corresponds to the ``CSC`` projection in FITS WCS.
"""
class Sky2Pix_COBEQuadSphericalCube(Sky2PixProjection, QuadCube):
r"""
COBE quadrilateralized spherical cube projection - sky to pixel.
Corresponds to the ``CSC`` projection in FITS WCS.
"""
class Pix2Sky_QuadSphericalCube(Pix2SkyProjection, QuadCube):
r"""
Quadrilateralized spherical cube projection - pixel to sky.
Corresponds to the ``QSC`` projection in FITS WCS.
"""
class Sky2Pix_QuadSphericalCube(Sky2PixProjection, QuadCube):
r"""
Quadrilateralized spherical cube projection - sky to pixel.
Corresponds to the ``QSC`` projection in FITS WCS.
"""
class HEALPix(Projection):
r"""Base class for HEALPix projections.
"""
class Pix2Sky_HEALPix(Pix2SkyProjection, HEALPix):
r"""
HEALPix - pixel to sky.
Corresponds to the ``HPX`` projection in FITS WCS.
Parameters
----------
H : float
The number of facets in longitude direction.
X : float
The number of facets in latitude direction.
"""
_separable = True
H = _ParameterDS(default=4.0, description="The number of facets in longitude direction.")
X = _ParameterDS(default=3.0, description="The number of facets in latitude direction.")
class Sky2Pix_HEALPix(Sky2PixProjection, HEALPix):
r"""
HEALPix projection - sky to pixel.
Corresponds to the ``HPX`` projection in FITS WCS.
Parameters
----------
H : float
The number of facets in longitude direction.
X : float
The number of facets in latitude direction.
"""
_separable = True
H = _ParameterDS(default=4.0, description="The number of facets in longitude direction.")
X = _ParameterDS(default=3.0, description="The number of facets in latitude direction.")
class Pix2Sky_HEALPixPolar(Pix2SkyProjection, HEALPix):
r"""
HEALPix polar, aka "butterfly" projection - pixel to sky.
Corresponds to the ``XPH`` projection in FITS WCS.
"""
class Sky2Pix_HEALPixPolar(Sky2PixProjection, HEALPix):
r"""
HEALPix polar, aka "butterfly" projection - pixel to sky.
Corresponds to the ``XPH`` projection in FITS WCS.
"""
class AffineTransformation2D(Model):
"""
Perform an affine transformation in 2 dimensions.
Parameters
----------
matrix : array
A 2x2 matrix specifying the linear transformation to apply to the
inputs
translation : array
A 2D vector (given as either a 2x1 or 1x2 array) specifying a
translation to apply to the inputs
"""
n_inputs = 2
n_outputs = 2
standard_broadcasting = False
_separable = False
matrix = Parameter(default=[[1.0, 0.0], [0.0, 1.0]])
translation = Parameter(default=[0.0, 0.0])
@matrix.validator
def matrix(self, value):
"""Validates that the input matrix is a 2x2 2D array."""
if np.shape(value) != (2, 2):
raise InputParameterError(
"Expected transformation matrix to be a 2x2 array")
@translation.validator
def translation(self, value):
"""
Validates that the translation vector is a 2D vector. This allows
either a "row" vector or a "column" vector where in the latter case the
resultant Numpy array has ``ndim=2`` but the shape is ``(1, 2)``.
"""
if not ((np.ndim(value) == 1 and np.shape(value) == (2,)) or
(np.ndim(value) == 2 and np.shape(value) == (1, 2))):
raise InputParameterError(
"Expected translation vector to be a 2 element row or column "
"vector array")
def __init__(self, matrix=matrix, translation=translation, **kwargs):
super().__init__(matrix=matrix, translation=translation, **kwargs)
self.inputs = ("x", "y")
self.outputs = ("x", "y")
@property
def inverse(self):
"""
Inverse transformation.
Raises `~astropy.modeling.InputParameterError` if the transformation cannot be inverted.
"""
det = np.linalg.det(self.matrix.value)
if det == 0:
raise InputParameterError(
f"Transformation matrix is singular; {self.__class__.__name__} model does not "
"have an inverse")
matrix = np.linalg.inv(self.matrix.value)
if self.matrix.unit is not None:
matrix = matrix * self.matrix.unit
# If matrix has unit then translation has unit, so no need to assign it.
translation = -np.dot(matrix, self.translation.value)
return self.__class__(matrix=matrix, translation=translation)
@classmethod
def evaluate(cls, x, y, matrix, translation):
"""
Apply the transformation to a set of 2D Cartesian coordinates given as
two lists--one for the x coordinates and one for a y coordinates--or a
single coordinate pair.
Parameters
----------
x, y : array, float
x and y coordinates
"""
if x.shape != y.shape:
raise ValueError("Expected input arrays to have the same shape")
shape = x.shape or (1,)
# Use asarray to ensure loose the units.
inarr = np.vstack([np.asarray(x).ravel(),
np.asarray(y).ravel(),
np.ones(x.size, x.dtype)])
if inarr.shape[0] != 3 or inarr.ndim != 2:
raise ValueError("Incompatible input shapes")
augmented_matrix = cls._create_augmented_matrix(matrix, translation)
result = np.dot(augmented_matrix, inarr)
x, y = result[0], result[1]
x.shape = y.shape = shape
return x, y
@staticmethod
def _create_augmented_matrix(matrix, translation):
unit = None
if any([hasattr(translation, 'unit'), hasattr(matrix, 'unit')]):
if not all([hasattr(translation, 'unit'), hasattr(matrix, 'unit')]):
raise ValueError("To use AffineTransformation with quantities, "
"both matrix and unit need to be quantities.")
unit = translation.unit
# matrix should have the same units as translation
if not (matrix.unit / translation.unit) == u.dimensionless_unscaled:
raise ValueError("matrix and translation must have the same units.")
augmented_matrix = np.empty((3, 3), dtype=float)
augmented_matrix[0:2, 0:2] = matrix
augmented_matrix[0:2, 2:].flat = translation
augmented_matrix[2] = [0, 0, 1]
if unit is not None:
return augmented_matrix * unit
return augmented_matrix
@property
def input_units(self):
if self.translation.unit is None and self.matrix.unit is None:
return None
elif self.translation.unit is not None:
return dict(zip(self.inputs, [self.translation.unit] * 2))
else:
return dict(zip(self.inputs, [self.matrix.unit] * 2))
for long_name, short_name in _PROJ_NAME_CODE:
# define short-name projection equivalent classes:
globals()['Pix2Sky_' + short_name] = globals()['Pix2Sky_' + long_name]
globals()['Sky2Pix_' + short_name] = globals()['Sky2Pix_' + long_name]
# set inverse classes:
globals()['Pix2Sky_' + long_name]._inv_cls = globals()['Sky2Pix_' + long_name]
globals()['Sky2Pix_' + long_name]._inv_cls = globals()['Pix2Sky_' + long_name]
|
5e131c7ded1b76bf02847450f36f69b9568cc77d5ecdee3014114381dca22dd7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains models representing polynomials and polynomial series.
"""
# pylint: disable=invalid-name
import numpy as np
from astropy.utils import check_broadcast, indent
from .core import FittableModel, Model
from .functional_models import Shift
from .parameters import Parameter
from .utils import _validate_domain_window, comb, poly_map_domain
__all__ = [
'Chebyshev1D', 'Chebyshev2D', 'Hermite1D', 'Hermite2D',
'InverseSIP', 'Legendre1D', 'Legendre2D', 'Polynomial1D',
'Polynomial2D', 'SIP', 'OrthoPolynomialBase',
'PolynomialModel'
]
class PolynomialBase(FittableModel):
"""
Base class for all polynomial-like models with an arbitrary number of
parameters in the form of coefficients.
In this case Parameter instances are returned through the class's
``__getattr__`` rather than through class descriptors.
"""
# Default _param_names list; this will be filled in by the implementation's
# __init__
_param_names = ()
linear = True
col_fit_deriv = False
@property
def param_names(self):
"""Coefficient names generated based on the model's polynomial degree
and number of dimensions.
Subclasses should implement this to return parameter names in the
desired format.
On most `Model` classes this is a class attribute, but for polynomial
models it is an instance attribute since each polynomial model instance
can have different parameters depending on the degree of the polynomial
and the number of dimensions, for example.
"""
return self._param_names
class PolynomialModel(PolynomialBase):
"""
Base class for polynomial models.
Its main purpose is to determine how many coefficients are needed
based on the polynomial order and dimension and to provide their
default values, names and ordering.
"""
def __init__(self, degree, n_models=None, model_set_axis=None,
name=None, meta=None, **params):
self._degree = degree
self._order = self.get_num_coeff(self.n_inputs)
self._param_names = self._generate_coeff_names(self.n_inputs)
if n_models:
if model_set_axis is None:
model_set_axis = 0
minshape = (1,) * model_set_axis + (n_models,)
else:
minshape = ()
for param_name in self._param_names:
self._parameters_[param_name] = Parameter(param_name, default=np.zeros(minshape))
super().__init__(
n_models=n_models, model_set_axis=model_set_axis, name=name,
meta=meta, **params)
@property
def degree(self):
"""Degree of polynomial."""
return self._degree
def get_num_coeff(self, ndim):
"""
Return the number of coefficients in one parameter set
"""
if self.degree < 0:
raise ValueError("Degree of polynomial must be positive or null")
# deg+1 is used to account for the difference between iraf using
# degree and numpy using exact degree
if ndim != 1:
nmixed = comb(self.degree, ndim)
else:
nmixed = 0
numc = self.degree * ndim + nmixed + 1
return numc
def _invlex(self):
c = []
lencoeff = self.degree + 1
for i in range(lencoeff):
for j in range(lencoeff):
if i + j <= self.degree:
c.append((j, i))
return c[::-1]
def _generate_coeff_names(self, ndim):
names = []
if ndim == 1:
for n in range(self._order):
names.append(f'c{n}')
else:
for i in range(self.degree + 1):
names.append(f'c{i}_{0}')
for i in range(1, self.degree + 1):
names.append(f'c{0}_{i}')
for i in range(1, self.degree):
for j in range(1, self.degree):
if i + j < self.degree + 1:
names.append(f'c{i}_{j}')
return tuple(names)
class _PolyDomainWindow1D(PolynomialModel):
"""
This class sets ``domain`` and ``window`` of 1D polynomials.
"""
def __init__(self, degree, domain=None, window=None, n_models=None,
model_set_axis=None, name=None, meta=None, **params):
super().__init__(
degree, n_models, model_set_axis, name=name, meta=meta, **params)
self._set_default_domain_window(domain, window)
@property
def window(self):
return self._window
@window.setter
def window(self, val):
self._window = _validate_domain_window(val)
@property
def domain(self):
return self._domain
@domain.setter
def domain(self, val):
self._domain = _validate_domain_window(val)
def _set_default_domain_window(self, domain, window):
"""
This method sets the ``domain`` and ``window`` attributes on 1D subclasses.
"""
self._default_domain_window = {'domain': None,
'window': (-1, 1)
}
self.window = window or (-1, 1)
self.domain = domain
def __repr__(self):
return self._format_repr([self.degree],
kwargs={'domain': self.domain, 'window': self.window},
defaults=self._default_domain_window
)
def __str__(self):
return self._format_str([('Degree', self.degree),
('Domain', self.domain),
('Window', self.window)],
self._default_domain_window)
class OrthoPolynomialBase(PolynomialBase):
"""
This is a base class for the 2D Chebyshev and Legendre models.
The polynomials implemented here require a maximum degree in x and y.
For explanation of ``x_domain``, ``y_domain``, ```x_window`` and ```y_window``
see :ref:`Notes regarding usage of domain and window <astropy:domain-window-note>`.
Parameters
----------
x_degree : int
degree in x
y_degree : int
degree in y
x_domain : tuple or None, optional
domain of the x independent variable
x_window : tuple or None, optional
range of the x independent variable
y_domain : tuple or None, optional
domain of the y independent variable
y_window : tuple or None, optional
range of the y independent variable
**params : dict
{keyword: value} pairs, representing {parameter_name: value}
"""
n_inputs = 2
n_outputs = 1
def __init__(self, x_degree, y_degree, x_domain=None, x_window=None,
y_domain=None, y_window=None, n_models=None,
model_set_axis=None, name=None, meta=None, **params):
self.x_degree = x_degree
self.y_degree = y_degree
self._order = self.get_num_coeff()
# Set the ``x/y_domain`` and ``x/y_wndow`` attributes in subclasses.
self._default_domain_window = {
'x_window': (-1, 1),
'y_window': (-1, 1),
'x_domain': None,
'y_domain': None
}
self.x_window = x_window or self._default_domain_window['x_window']
self.y_window = y_window or self._default_domain_window['y_window']
self.x_domain = x_domain
self.y_domain = y_domain
self._param_names = self._generate_coeff_names()
if n_models:
if model_set_axis is None:
model_set_axis = 0
minshape = (1,) * model_set_axis + (n_models,)
else:
minshape = ()
for param_name in self._param_names:
self._parameters_[param_name] = Parameter(param_name, default=np.zeros(minshape))
super().__init__(
n_models=n_models, model_set_axis=model_set_axis,
name=name, meta=meta, **params)
@property
def x_domain(self):
return self._x_domain
@x_domain.setter
def x_domain(self, val):
self._x_domain = _validate_domain_window(val)
@property
def y_domain(self):
return self._y_domain
@y_domain.setter
def y_domain(self, val):
self._y_domain = _validate_domain_window(val)
@property
def x_window(self):
return self._x_window
@x_window.setter
def x_window(self, val):
self._x_window = _validate_domain_window(val)
@property
def y_window(self):
return self._y_window
@y_window.setter
def y_window(self, val):
self._y_window = _validate_domain_window(val)
def __repr__(self):
return self._format_repr([self.x_degree, self.y_degree],
kwargs={'x_domain': self.x_domain,
'y_domain': self.y_domain,
'x_window': self.x_window,
'y_window': self.y_window},
defaults=self._default_domain_window)
def __str__(self):
return self._format_str(
[('X_Degree', self.x_degree),
('Y_Degree', self.y_degree),
('X_Domain', self.x_domain),
('Y_Domain', self.y_domain),
('X_Window', self.x_window),
('Y_Window', self.y_window)],
self._default_domain_window)
def get_num_coeff(self):
"""
Determine how many coefficients are needed
Returns
-------
numc : int
number of coefficients
"""
if self.x_degree < 0 or self.y_degree < 0:
raise ValueError("Degree of polynomial must be positive or null")
return (self.x_degree + 1) * (self.y_degree + 1)
def _invlex(self):
# TODO: This is a very slow way to do this; fix it and related methods
# like _alpha
c = []
xvar = np.arange(self.x_degree + 1)
yvar = np.arange(self.y_degree + 1)
for j in yvar:
for i in xvar:
c.append((i, j))
return np.array(c[::-1])
def invlex_coeff(self, coeffs):
invlex_coeffs = []
xvar = np.arange(self.x_degree + 1)
yvar = np.arange(self.y_degree + 1)
for j in yvar:
for i in xvar:
name = f'c{i}_{j}'
coeff = coeffs[self.param_names.index(name)]
invlex_coeffs.append(coeff)
return np.array(invlex_coeffs[::-1])
def _alpha(self):
invlexdeg = self._invlex()
invlexdeg[:, 1] = invlexdeg[:, 1] + self.x_degree + 1
nx = self.x_degree + 1
ny = self.y_degree + 1
alpha = np.zeros((ny * nx + 3, ny + nx))
for n in range(len(invlexdeg)):
alpha[n][invlexdeg[n]] = [1, 1]
alpha[-2, 0] = 1
alpha[-3, nx] = 1
return alpha
def imhorner(self, x, y, coeff):
_coeff = list(coeff)
_coeff.extend([0, 0, 0])
alpha = self._alpha()
r0 = _coeff[0]
nalpha = len(alpha)
karr = np.diff(alpha, axis=0)
kfunc = self._fcache(x, y)
x_terms = self.x_degree + 1
y_terms = self.y_degree + 1
nterms = x_terms + y_terms
for n in range(1, nterms + 1 + 3):
setattr(self, 'r' + str(n), 0.)
for n in range(1, nalpha):
k = karr[n - 1].nonzero()[0].max() + 1
rsum = 0
for i in range(1, k + 1):
rsum = rsum + getattr(self, 'r' + str(i))
val = kfunc[k - 1] * (r0 + rsum)
setattr(self, 'r' + str(k), val)
r0 = _coeff[n]
for i in range(1, k):
setattr(self, 'r' + str(i), 0.)
result = r0
for i in range(1, nterms + 1 + 3):
result = result + getattr(self, 'r' + str(i))
return result
def _generate_coeff_names(self):
names = []
for j in range(self.y_degree + 1):
for i in range(self.x_degree + 1):
names.append(f'c{i}_{j}')
return tuple(names)
def _fcache(self, x, y):
"""
Computation and store the individual functions.
To be implemented by subclasses"
"""
raise NotImplementedError("Subclasses should implement this")
def evaluate(self, x, y, *coeffs):
if self.x_domain is not None:
x = poly_map_domain(x, self.x_domain, self.x_window)
if self.y_domain is not None:
y = poly_map_domain(y, self.y_domain, self.y_window)
invcoeff = self.invlex_coeff(coeffs)
return self.imhorner(x, y, invcoeff)
def prepare_inputs(self, x, y, **kwargs):
inputs, broadcasted_shapes = super().prepare_inputs(x, y, **kwargs)
x, y = inputs
if x.shape != y.shape:
raise ValueError("Expected input arrays to have the same shape")
return (x, y), broadcasted_shapes
class Chebyshev1D(_PolyDomainWindow1D):
r"""
Univariate Chebyshev series.
It is defined as:
.. math::
P(x) = \sum_{i=0}^{i=n}C_{i} * T_{i}(x)
where ``T_i(x)`` is the corresponding Chebyshev polynomial of the 1st kind.
For explanation of ```domain``, and ``window`` see
:ref:`Notes regarding usage of domain and window <domain-window-note>`.
Parameters
----------
degree : int
degree of the series
domain : tuple or None, optional
window : tuple or None, optional
If None, it is set to (-1, 1)
Fitters will remap the domain to this window.
**params : dict
keyword : value pairs, representing parameter_name: value
Notes
-----
This model does not support the use of units/quantities, because each term
in the sum of Chebyshev polynomials is a polynomial in x - since the
coefficients within each Chebyshev polynomial are fixed, we can't use
quantities for x since the units would not be compatible. For example, the
third Chebyshev polynomial (T2) is 2x^2-1, but if x was specified with
units, 2x^2 and -1 would have incompatible units.
"""
n_inputs = 1
n_outputs = 1
_separable = True
def __init__(self, degree, domain=None, window=None, n_models=None,
model_set_axis=None, name=None, meta=None, **params):
super().__init__(degree, domain=domain, window=window, n_models=n_models,
model_set_axis=model_set_axis, name=name, meta=meta, **params)
def fit_deriv(self, x, *params):
"""
Computes the Vandermonde matrix.
Parameters
----------
x : ndarray
input
*params
throw-away parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
x = np.array(x, dtype=float, copy=False, ndmin=1)
v = np.empty((self.degree + 1,) + x.shape, dtype=x.dtype)
v[0] = 1
if self.degree > 0:
x2 = 2 * x
v[1] = x
for i in range(2, self.degree + 1):
v[i] = v[i - 1] * x2 - v[i - 2]
return np.rollaxis(v, 0, v.ndim)
def prepare_inputs(self, x, **kwargs):
inputs, broadcasted_shapes = super().prepare_inputs(x, **kwargs)
x = inputs[0]
return (x,), broadcasted_shapes
def evaluate(self, x, *coeffs):
if self.domain is not None:
x = poly_map_domain(x, self.domain, self.window)
return self.clenshaw(x, coeffs)
@staticmethod
def clenshaw(x, coeffs):
"""Evaluates the polynomial using Clenshaw's algorithm."""
if len(coeffs) == 1:
c0 = coeffs[0]
c1 = 0
elif len(coeffs) == 2:
c0 = coeffs[0]
c1 = coeffs[1]
else:
x2 = 2 * x
c0 = coeffs[-2]
c1 = coeffs[-1]
for i in range(3, len(coeffs) + 1):
tmp = c0
c0 = coeffs[-i] - c1
c1 = tmp + c1 * x2
return c0 + c1 * x
class Hermite1D(_PolyDomainWindow1D):
r"""
Univariate Hermite series.
It is defined as:
.. math::
P(x) = \sum_{i=0}^{i=n}C_{i} * H_{i}(x)
where ``H_i(x)`` is the corresponding Hermite polynomial ("Physicist's kind").
For explanation of ``domain``, and ``window`` see
:ref:`Notes regarding usage of domain and window <domain-window-note>`.
Parameters
----------
degree : int
degree of the series
domain : tuple or None, optional
window : tuple or None, optional
If None, it is set to (-1, 1)
Fitters will remap the domain to this window
**params : dict
keyword : value pairs, representing parameter_name: value
Notes
-----
This model does not support the use of units/quantities, because each term
in the sum of Hermite polynomials is a polynomial in x - since the
coefficients within each Hermite polynomial are fixed, we can't use
quantities for x since the units would not be compatible. For example, the
third Hermite polynomial (H2) is 4x^2-2, but if x was specified with units,
4x^2 and -2 would have incompatible units.
"""
n_inputs = 1
n_outputs = 1
_separable = True
def __init__(self, degree, domain=None, window=None, n_models=None,
model_set_axis=None, name=None, meta=None, **params):
super().__init__(
degree, domain, window, n_models=n_models,
model_set_axis=model_set_axis, name=name, meta=meta, **params)
def fit_deriv(self, x, *params):
"""
Computes the Vandermonde matrix.
Parameters
----------
x : ndarray
input
*params
throw-away parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
x = np.array(x, dtype=float, copy=False, ndmin=1)
v = np.empty((self.degree + 1,) + x.shape, dtype=x.dtype)
v[0] = 1
if self.degree > 0:
x2 = 2 * x
v[1] = 2 * x
for i in range(2, self.degree + 1):
v[i] = x2 * v[i - 1] - 2 * (i - 1) * v[i - 2]
return np.rollaxis(v, 0, v.ndim)
def prepare_inputs(self, x, **kwargs):
inputs, broadcasted_shapes = super().prepare_inputs(x, **kwargs)
x = inputs[0]
return (x,), broadcasted_shapes
def evaluate(self, x, *coeffs):
if self.domain is not None:
x = poly_map_domain(x, self.domain, self.window)
return self.clenshaw(x, coeffs)
@staticmethod
def clenshaw(x, coeffs):
x2 = x * 2
if len(coeffs) == 1:
c0 = coeffs[0]
c1 = 0
elif len(coeffs) == 2:
c0 = coeffs[0]
c1 = coeffs[1]
else:
nd = len(coeffs)
c0 = coeffs[-2]
c1 = coeffs[-1]
for i in range(3, len(coeffs) + 1):
temp = c0
nd = nd - 1
c0 = coeffs[-i] - c1 * (2 * (nd - 1))
c1 = temp + c1 * x2
return c0 + c1 * x2
class Hermite2D(OrthoPolynomialBase):
r"""
Bivariate Hermite series.
It is defined as
.. math:: P_{nm}(x,y) = \sum_{n,m=0}^{n=d,m=d}C_{nm} H_n(x) H_m(y)
where ``H_n(x)`` and ``H_m(y)`` are Hermite polynomials.
For explanation of ``x_domain``, ``y_domain``, ``x_window`` and ``y_window``
see :ref:`Notes regarding usage of domain and window <domain-window-note>`.
Parameters
----------
x_degree : int
degree in x
y_degree : int
degree in y
x_domain : tuple or None, optional
domain of the x independent variable
y_domain : tuple or None, optional
domain of the y independent variable
x_window : tuple or None, optional
range of the x independent variable
If None, it is set to (-1, 1)
Fitters will remap the domain to this window
y_window : tuple or None, optional
range of the y independent variable
If None, it is set to (-1, 1)
Fitters will remap the domain to this window
**params : dict
keyword: value pairs, representing parameter_name: value
Notes
-----
This model does not support the use of units/quantities, because each term
in the sum of Hermite polynomials is a polynomial in x and/or y - since the
coefficients within each Hermite polynomial are fixed, we can't use
quantities for x and/or y since the units would not be compatible. For
example, the third Hermite polynomial (H2) is 4x^2-2, but if x was
specified with units, 4x^2 and -2 would have incompatible units.
"""
_separable = False
def __init__(self, x_degree, y_degree, x_domain=None, x_window=None,
y_domain=None, y_window=None, n_models=None,
model_set_axis=None, name=None, meta=None, **params):
super().__init__(
x_degree, y_degree, x_domain=x_domain, y_domain=y_domain,
x_window=x_window, y_window=y_window, n_models=n_models,
model_set_axis=model_set_axis, name=name, meta=meta, **params)
def _fcache(self, x, y):
"""
Calculate the individual Hermite functions once and store them in a
dictionary to be reused.
"""
x_terms = self.x_degree + 1
y_terms = self.y_degree + 1
kfunc = {}
kfunc[0] = np.ones(x.shape)
kfunc[1] = 2 * x.copy()
kfunc[x_terms] = np.ones(y.shape)
kfunc[x_terms + 1] = 2 * y.copy()
for n in range(2, x_terms):
kfunc[n] = 2 * x * kfunc[n - 1] - 2 * (n - 1) * kfunc[n - 2]
for n in range(x_terms + 2, x_terms + y_terms):
kfunc[n] = 2 * y * kfunc[n - 1] - 2 * (n - 1) * kfunc[n - 2]
return kfunc
def fit_deriv(self, x, y, *params):
"""
Derivatives with respect to the coefficients.
This is an array with Hermite polynomials:
.. math::
H_{x_0}H_{y_0}, H_{x_1}H_{y_0}...H_{x_n}H_{y_0}...H_{x_n}H_{y_m}
Parameters
----------
x : ndarray
input
y : ndarray
input
*params
throw-away parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
if x.shape != y.shape:
raise ValueError("x and y must have the same shape")
x = x.flatten()
y = y.flatten()
x_deriv = self._hermderiv1d(x, self.x_degree + 1).T
y_deriv = self._hermderiv1d(y, self.y_degree + 1).T
ij = []
for i in range(self.y_degree + 1):
for j in range(self.x_degree + 1):
ij.append(x_deriv[j] * y_deriv[i])
v = np.array(ij)
return v.T
def _hermderiv1d(self, x, deg):
"""
Derivative of 1D Hermite series
"""
x = np.array(x, dtype=float, copy=False, ndmin=1)
d = np.empty((deg + 1, len(x)), dtype=x.dtype)
d[0] = x * 0 + 1
if deg > 0:
x2 = 2 * x
d[1] = x2
for i in range(2, deg + 1):
d[i] = x2 * d[i - 1] - 2 * (i - 1) * d[i - 2]
return np.rollaxis(d, 0, d.ndim)
class Legendre1D(_PolyDomainWindow1D):
r"""
Univariate Legendre series.
It is defined as:
.. math::
P(x) = \sum_{i=0}^{i=n}C_{i} * L_{i}(x)
where ``L_i(x)`` is the corresponding Legendre polynomial.
For explanation of ``domain``, and ``window`` see
:ref:`Notes regarding usage of domain and window <domain-window-note>`.
Parameters
----------
degree : int
degree of the series
domain : tuple or None, optional
window : tuple or None, optional
If None, it is set to (-1, 1)
Fitters will remap the domain to this window
**params : dict
keyword: value pairs, representing parameter_name: value
Notes
-----
This model does not support the use of units/quantities, because each term
in the sum of Legendre polynomials is a polynomial in x - since the
coefficients within each Legendre polynomial are fixed, we can't use
quantities for x since the units would not be compatible. For example, the
third Legendre polynomial (P2) is 1.5x^2-0.5, but if x was specified with
units, 1.5x^2 and -0.5 would have incompatible units.
"""
n_inputs = 1
n_outputs = 1
_separable = True
def __init__(self, degree, domain=None, window=None, n_models=None,
model_set_axis=None, name=None, meta=None, **params):
super().__init__(
degree, domain, window, n_models=n_models,
model_set_axis=model_set_axis, name=name, meta=meta, **params)
def prepare_inputs(self, x, **kwargs):
inputs, broadcasted_shapes = super().prepare_inputs(x, **kwargs)
x = inputs[0]
return (x,), broadcasted_shapes
def evaluate(self, x, *coeffs):
if self.domain is not None:
x = poly_map_domain(x, self.domain, self.window)
return self.clenshaw(x, coeffs)
def fit_deriv(self, x, *params):
"""
Computes the Vandermonde matrix.
Parameters
----------
x : ndarray
input
*params
throw-away parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
x = np.array(x, dtype=float, copy=False, ndmin=1)
v = np.empty((self.degree + 1,) + x.shape, dtype=x.dtype)
v[0] = 1
if self.degree > 0:
v[1] = x
for i in range(2, self.degree + 1):
v[i] = (v[i - 1] * x * (2 * i - 1) - v[i - 2] * (i - 1)) / i
return np.rollaxis(v, 0, v.ndim)
@staticmethod
def clenshaw(x, coeffs):
if len(coeffs) == 1:
c0 = coeffs[0]
c1 = 0
elif len(coeffs) == 2:
c0 = coeffs[0]
c1 = coeffs[1]
else:
nd = len(coeffs)
c0 = coeffs[-2]
c1 = coeffs[-1]
for i in range(3, len(coeffs) + 1):
tmp = c0
nd = nd - 1
c0 = coeffs[-i] - (c1 * (nd - 1)) / nd
c1 = tmp + (c1 * x * (2 * nd - 1)) / nd
return c0 + c1 * x
class Polynomial1D(_PolyDomainWindow1D):
r"""
1D Polynomial model.
It is defined as:
.. math::
P = \sum_{i=0}^{i=n}C_{i} * x^{i}
For explanation of ``domain``, and ``window`` see
:ref:`Notes regarding usage of domain and window <domain-window-note>`.
Parameters
----------
degree : int
degree of the series
domain : tuple or None, optional
If None, it is set to (-1, 1)
window : tuple or None, optional
If None, it is set to (-1, 1)
Fitters will remap the domain to this window
**params : dict
keyword: value pairs, representing parameter_name: value
"""
n_inputs = 1
n_outputs = 1
_separable = True
def __init__(self, degree, domain=None, window=None, n_models=None,
model_set_axis=None, name=None, meta=None, **params):
super().__init__(
degree, domain, window, n_models=n_models,
model_set_axis=model_set_axis, name=name, meta=meta, **params)
# Set domain separately because it's different from
# the orthogonal polynomials.
self._default_domain_window = {'domain': (-1, 1),
'window': (-1, 1),
}
self.domain = domain or self._default_domain_window['domain']
self.window = window or self._default_domain_window['window']
def prepare_inputs(self, x, **kwargs):
inputs, broadcasted_shapes = super().prepare_inputs(x, **kwargs)
x = inputs[0]
return (x,), broadcasted_shapes
def evaluate(self, x, *coeffs):
if self.domain is not None:
x = poly_map_domain(x, self.domain, self.window)
return self.horner(x, coeffs)
def fit_deriv(self, x, *params):
"""
Computes the Vandermonde matrix.
Parameters
----------
x : ndarray
input
*params
throw-away parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
v = np.empty((self.degree + 1,) + x.shape, dtype=float)
v[0] = 1
if self.degree > 0:
v[1] = x
for i in range(2, self.degree + 1):
v[i] = v[i - 1] * x
return np.rollaxis(v, 0, v.ndim)
@staticmethod
def horner(x, coeffs):
if len(coeffs) == 1:
c0 = coeffs[-1] * np.ones_like(x, subok=False)
else:
c0 = coeffs[-1]
for i in range(2, len(coeffs) + 1):
c0 = coeffs[-i] + c0 * x
return c0
@property
def input_units(self):
if self.degree == 0 or self.c1.unit is None:
return None
else:
return {self.inputs[0]: self.c0.unit / self.c1.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
mapping = {}
for i in range(self.degree + 1):
par = getattr(self, f'c{i}')
mapping[par.name] = outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]] ** i
return mapping
class Polynomial2D(PolynomialModel):
r"""
2D Polynomial model.
Represents a general polynomial of degree n:
.. math::
P(x,y) = c_{00} + c_{10}x + ...+ c_{n0}x^n + c_{01}y + ...+ c_{0n}y^n
+ c_{11}xy + c_{12}xy^2 + ... + c_{1(n-1)}xy^{n-1}+ ... + c_{(n-1)1}x^{n-1}y
For explanation of ``x_domain``, ``y_domain``, ``x_window`` and ``y_window``
see :ref:`Notes regarding usage of domain and window <domain-window-note>`.
Parameters
----------
degree : int
Polynomial degree: largest sum of exponents (:math:`i + j`) of
variables in each monomial term of the form :math:`x^i y^j`. The
number of terms in a 2D polynomial of degree ``n`` is given by binomial
coefficient :math:`C(n + 2, 2) = (n + 2)! / (2!\,n!) = (n + 1)(n + 2) / 2`.
x_domain : tuple or None, optional
domain of the x independent variable
If None, it is set to (-1, 1)
y_domain : tuple or None, optional
domain of the y independent variable
If None, it is set to (-1, 1)
x_window : tuple or None, optional
range of the x independent variable
If None, it is set to (-1, 1)
Fitters will remap the x_domain to x_window
y_window : tuple or None, optional
range of the y independent variable
If None, it is set to (-1, 1)
Fitters will remap the y_domain to y_window
**params : dict
keyword: value pairs, representing parameter_name: value
"""
n_inputs = 2
n_outputs = 1
_separable = False
def __init__(self, degree, x_domain=None, y_domain=None,
x_window=None, y_window=None, n_models=None,
model_set_axis=None, name=None, meta=None, **params):
super().__init__(
degree, n_models=n_models, model_set_axis=model_set_axis,
name=name, meta=meta, **params)
self._default_domain_window = {
'x_domain': (-1, 1),
'y_domain': (-1, 1),
'x_window': (-1, 1),
'y_window': (-1, 1)
}
self.x_domain = x_domain or self._default_domain_window['x_domain']
self.y_domain = y_domain or self._default_domain_window['y_domain']
self.x_window = x_window or self._default_domain_window['x_window']
self.y_window = y_window or self._default_domain_window['y_window']
def prepare_inputs(self, x, y, **kwargs):
inputs, broadcasted_shapes = super().prepare_inputs(x, y, **kwargs)
x, y = inputs
return (x, y), broadcasted_shapes
def evaluate(self, x, y, *coeffs):
if self.x_domain is not None:
x = poly_map_domain(x, self.x_domain, self.x_window)
if self.y_domain is not None:
y = poly_map_domain(y, self.y_domain, self.y_window)
invcoeff = self.invlex_coeff(coeffs)
result = self.multivariate_horner(x, y, invcoeff)
# Special case for degree==0 to ensure that the shape of the output is
# still as expected by the broadcasting rules, even though the x and y
# inputs are not used in the evaluation
if self.degree == 0:
output_shape = check_broadcast(np.shape(coeffs[0]), x.shape)
if output_shape:
new_result = np.empty(output_shape)
new_result[:] = result
result = new_result
return result
def __repr__(self):
return self._format_repr([self.degree],
kwargs={'x_domain': self.x_domain,
'y_domain': self.y_domain,
'x_window': self.x_window,
'y_window': self.y_window},
defaults=self._default_domain_window)
def __str__(self):
return self._format_str([('Degree', self.degree),
('X_Domain', self.x_domain),
('Y_Domain', self.y_domain),
('X_Window', self.x_window),
('Y_Window', self.y_window)],
self._default_domain_window)
def fit_deriv(self, x, y, *params):
"""
Computes the Vandermonde matrix.
Parameters
----------
x : ndarray
input
y : ndarray
input
*params
throw-away parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
if x.ndim == 2:
x = x.flatten()
if y.ndim == 2:
y = y.flatten()
if x.size != y.size:
raise ValueError('Expected x and y to be of equal size')
designx = x[:, None] ** np.arange(self.degree + 1)
designy = y[:, None] ** np.arange(1, self.degree + 1)
designmixed = []
for i in range(1, self.degree):
for j in range(1, self.degree):
if i + j <= self.degree:
designmixed.append((x ** i) * (y ** j))
designmixed = np.array(designmixed).T
if designmixed.any():
v = np.hstack([designx, designy, designmixed])
else:
v = np.hstack([designx, designy])
return v
def invlex_coeff(self, coeffs):
invlex_coeffs = []
lencoeff = range(self.degree + 1)
for i in lencoeff:
for j in lencoeff:
if i + j <= self.degree:
name = f'c{j}_{i}'
coeff = coeffs[self.param_names.index(name)]
invlex_coeffs.append(coeff)
return invlex_coeffs[::-1]
def multivariate_horner(self, x, y, coeffs):
"""
Multivariate Horner's scheme
Parameters
----------
x, y : array
coeffs : array
Coefficients in inverse lexical order.
"""
alpha = self._invlex()
r0 = coeffs[0]
r1 = r0 * 0.0
r2 = r0 * 0.0
karr = np.diff(alpha, axis=0)
for n in range(len(karr)):
if karr[n, 1] != 0:
r2 = y * (r0 + r1 + r2)
r1 = np.zeros_like(coeffs[0], subok=False)
else:
r1 = x * (r0 + r1)
r0 = coeffs[n + 1]
return r0 + r1 + r2
@property
def input_units(self):
if self.degree == 0 or (self.c1_0.unit is None and self.c0_1.unit is None):
return None
return {self.inputs[0]: self.c0_0.unit / self.c1_0.unit,
self.inputs[1]: self.c0_0.unit / self.c0_1.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
mapping = {}
for i in range(self.degree + 1):
for j in range(self.degree + 1):
if i + j > 2:
continue
par = getattr(self, f'c{i}_{j}')
mapping[par.name] = (outputs_unit[self.outputs[0]]
/ inputs_unit[self.inputs[0]] ** i
/ inputs_unit[self.inputs[1]] ** j)
return mapping
@property
def x_domain(self):
return self._x_domain
@x_domain.setter
def x_domain(self, val):
self._x_domain = _validate_domain_window(val)
@property
def y_domain(self):
return self._y_domain
@y_domain.setter
def y_domain(self, val):
self._y_domain = _validate_domain_window(val)
@property
def x_window(self):
return self._x_window
@x_window.setter
def x_window(self, val):
self._x_window = _validate_domain_window(val)
@property
def y_window(self):
return self._y_window
@y_window.setter
def y_window(self, val):
self._y_window = _validate_domain_window(val)
class Chebyshev2D(OrthoPolynomialBase):
r"""
Bivariate Chebyshev series..
It is defined as
.. math:: P_{nm}(x,y) = \sum_{n,m=0}^{n=d,m=d}C_{nm} T_n(x ) T_m(y)
where ``T_n(x)`` and ``T_m(y)`` are Chebyshev polynomials of the first kind.
For explanation of ``x_domain``, ``y_domain``, ``x_window`` and ``y_window``
see :ref:`Notes regarding usage of domain and window <domain-window-note>`.
Parameters
----------
x_degree : int
degree in x
y_degree : int
degree in y
x_domain : tuple or None, optional
domain of the x independent variable
y_domain : tuple or None, optional
domain of the y independent variable
x_window : tuple or None, optional
range of the x independent variable
If None, it is set to (-1, 1)
Fitters will remap the domain to this window
y_window : tuple or None, optional
range of the y independent variable
If None, it is set to (-1, 1)
Fitters will remap the domain to this window
**params : dict
keyword: value pairs, representing parameter_name: value
Notes
-----
This model does not support the use of units/quantities, because each term
in the sum of Chebyshev polynomials is a polynomial in x and/or y - since
the coefficients within each Chebyshev polynomial are fixed, we can't use
quantities for x and/or y since the units would not be compatible. For
example, the third Chebyshev polynomial (T2) is 2x^2-1, but if x was
specified with units, 2x^2 and -1 would have incompatible units.
"""
_separable = False
def __init__(self, x_degree, y_degree, x_domain=None, x_window=None,
y_domain=None, y_window=None, n_models=None,
model_set_axis=None, name=None, meta=None, **params):
super().__init__(
x_degree, y_degree, x_domain=x_domain, y_domain=y_domain,
x_window=x_window, y_window=y_window, n_models=n_models,
model_set_axis=model_set_axis, name=name, meta=meta, **params)
def _fcache(self, x, y):
"""
Calculate the individual Chebyshev functions once and store them in a
dictionary to be reused.
"""
x_terms = self.x_degree + 1
y_terms = self.y_degree + 1
kfunc = {}
kfunc[0] = np.ones(x.shape)
kfunc[1] = x.copy()
kfunc[x_terms] = np.ones(y.shape)
kfunc[x_terms + 1] = y.copy()
for n in range(2, x_terms):
kfunc[n] = 2 * x * kfunc[n - 1] - kfunc[n - 2]
for n in range(x_terms + 2, x_terms + y_terms):
kfunc[n] = 2 * y * kfunc[n - 1] - kfunc[n - 2]
return kfunc
def fit_deriv(self, x, y, *params):
"""
Derivatives with respect to the coefficients.
This is an array with Chebyshev polynomials:
.. math::
T_{x_0}T_{y_0}, T_{x_1}T_{y_0}...T_{x_n}T_{y_0}...T_{x_n}T_{y_m}
Parameters
----------
x : ndarray
input
y : ndarray
input
*params
throw-away parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
if x.shape != y.shape:
raise ValueError("x and y must have the same shape")
x = x.flatten()
y = y.flatten()
x_deriv = self._chebderiv1d(x, self.x_degree + 1).T
y_deriv = self._chebderiv1d(y, self.y_degree + 1).T
ij = []
for i in range(self.y_degree + 1):
for j in range(self.x_degree + 1):
ij.append(x_deriv[j] * y_deriv[i])
v = np.array(ij)
return v.T
def _chebderiv1d(self, x, deg):
"""
Derivative of 1D Chebyshev series
"""
x = np.array(x, dtype=float, copy=False, ndmin=1)
d = np.empty((deg + 1, len(x)), dtype=x.dtype)
d[0] = x * 0 + 1
if deg > 0:
x2 = 2 * x
d[1] = x
for i in range(2, deg + 1):
d[i] = d[i - 1] * x2 - d[i - 2]
return np.rollaxis(d, 0, d.ndim)
class Legendre2D(OrthoPolynomialBase):
r"""
Bivariate Legendre series.
Defined as:
.. math:: P_{n_m}(x,y) = \sum_{n,m=0}^{n=d,m=d}C_{nm} L_n(x ) L_m(y)
where ``L_n(x)`` and ``L_m(y)`` are Legendre polynomials.
For explanation of ``x_domain``, ``y_domain``, ``x_window`` and ``y_window``
see :ref:`Notes regarding usage of domain and window <domain-window-note>`.
Parameters
----------
x_degree : int
degree in x
y_degree : int
degree in y
x_domain : tuple or None, optional
domain of the x independent variable
y_domain : tuple or None, optional
domain of the y independent variable
x_window : tuple or None, optional
range of the x independent variable
If None, it is set to (-1, 1)
Fitters will remap the domain to this window
y_window : tuple or None, optional
range of the y independent variable
If None, it is set to (-1, 1)
Fitters will remap the domain to this window
**params : dict
keyword: value pairs, representing parameter_name: value
Notes
-----
Model formula:
.. math::
P(x) = \sum_{i=0}^{i=n}C_{i} * L_{i}(x)
where ``L_{i}`` is the corresponding Legendre polynomial.
This model does not support the use of units/quantities, because each term
in the sum of Legendre polynomials is a polynomial in x - since the
coefficients within each Legendre polynomial are fixed, we can't use
quantities for x since the units would not be compatible. For example, the
third Legendre polynomial (P2) is 1.5x^2-0.5, but if x was specified with
units, 1.5x^2 and -0.5 would have incompatible units.
"""
_separable = False
def __init__(self, x_degree, y_degree, x_domain=None, x_window=None,
y_domain=None, y_window=None, n_models=None,
model_set_axis=None, name=None, meta=None, **params):
super().__init__(
x_degree, y_degree, x_domain=x_domain, y_domain=y_domain,
x_window=x_window, y_window=y_window, n_models=n_models,
model_set_axis=model_set_axis, name=name, meta=meta, **params)
def _fcache(self, x, y):
"""
Calculate the individual Legendre functions once and store them in a
dictionary to be reused.
"""
x_terms = self.x_degree + 1
y_terms = self.y_degree + 1
kfunc = {}
kfunc[0] = np.ones(x.shape)
kfunc[1] = x.copy()
kfunc[x_terms] = np.ones(y.shape)
kfunc[x_terms + 1] = y.copy()
for n in range(2, x_terms):
kfunc[n] = (((2 * (n - 1) + 1) * x * kfunc[n - 1] -
(n - 1) * kfunc[n - 2]) / n)
for n in range(2, y_terms):
kfunc[n + x_terms] = ((2 * (n - 1) + 1) * y * kfunc[n + x_terms - 1] -
(n - 1) * kfunc[n + x_terms - 2]) / (n)
return kfunc
def fit_deriv(self, x, y, *params):
"""
Derivatives with respect to the coefficients.
This is an array with Legendre polynomials:
Lx0Ly0 Lx1Ly0...LxnLy0...LxnLym
Parameters
----------
x : ndarray
input
y : ndarray
input
*params
throw-away parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
if x.shape != y.shape:
raise ValueError("x and y must have the same shape")
x = x.flatten()
y = y.flatten()
x_deriv = self._legendderiv1d(x, self.x_degree + 1).T
y_deriv = self._legendderiv1d(y, self.y_degree + 1).T
ij = []
for i in range(self.y_degree + 1):
for j in range(self.x_degree + 1):
ij.append(x_deriv[j] * y_deriv[i])
v = np.array(ij)
return v.T
def _legendderiv1d(self, x, deg):
"""Derivative of 1D Legendre polynomial"""
x = np.array(x, dtype=float, copy=False, ndmin=1)
d = np.empty((deg + 1,) + x.shape, dtype=x.dtype)
d[0] = x * 0 + 1
if deg > 0:
d[1] = x
for i in range(2, deg + 1):
d[i] = (d[i - 1] * x * (2 * i - 1) - d[i - 2] * (i - 1)) / i
return np.rollaxis(d, 0, d.ndim)
class _SIP1D(PolynomialBase):
"""
This implements the Simple Imaging Polynomial Model (SIP) in 1D.
It's unlikely it will be used in 1D so this class is private
and SIP should be used instead.
"""
n_inputs = 2
n_outputs = 1
_separable = False
def __init__(self, order, coeff_prefix, n_models=None,
model_set_axis=None, name=None, meta=None, **params):
self.order = order
self.coeff_prefix = coeff_prefix
self._param_names = self._generate_coeff_names(coeff_prefix)
if n_models:
if model_set_axis is None:
model_set_axis = 0
minshape = (1,) * model_set_axis + (n_models,)
else:
minshape = ()
for param_name in self._param_names:
self._parameters_[param_name] = Parameter(param_name, default=np.zeros(minshape))
super().__init__(n_models=n_models, model_set_axis=model_set_axis,
name=name, meta=meta, **params)
def __repr__(self):
return self._format_repr(args=[self.order, self.coeff_prefix])
def __str__(self):
return self._format_str(
[('Order', self.order),
('Coeff. Prefix', self.coeff_prefix)])
def evaluate(self, x, y, *coeffs):
# TODO: Rewrite this so that it uses a simpler method of determining
# the matrix based on the number of given coefficients.
mcoef = self._coeff_matrix(self.coeff_prefix, coeffs)
return self._eval_sip(x, y, mcoef)
def get_num_coeff(self, ndim):
"""
Return the number of coefficients in one param set
"""
if self.order < 2 or self.order > 9:
raise ValueError("Degree of polynomial must be 2< deg < 9")
nmixed = comb(self.order, ndim)
# remove 3 terms because SIP deg >= 2
numc = self.order * ndim + nmixed - 2
return numc
def _generate_coeff_names(self, coeff_prefix):
names = []
for i in range(2, self.order + 1):
names.append(f'{coeff_prefix}_{i}_{0}')
for i in range(2, self.order + 1):
names.append(f'{coeff_prefix}_{0}_{i}')
for i in range(1, self.order):
for j in range(1, self.order):
if i + j < self.order + 1:
names.append(f'{coeff_prefix}_{i}_{j}')
return tuple(names)
def _coeff_matrix(self, coeff_prefix, coeffs):
mat = np.zeros((self.order + 1, self.order + 1))
for i in range(2, self.order + 1):
attr = f'{coeff_prefix}_{i}_{0}'
mat[i, 0] = coeffs[self.param_names.index(attr)]
for i in range(2, self.order + 1):
attr = f'{coeff_prefix}_{0}_{i}'
mat[0, i] = coeffs[self.param_names.index(attr)]
for i in range(1, self.order):
for j in range(1, self.order):
if i + j < self.order + 1:
attr = f'{coeff_prefix}_{i}_{j}'
mat[i, j] = coeffs[self.param_names.index(attr)]
return mat
def _eval_sip(self, x, y, coef):
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
if self.coeff_prefix == 'A':
result = np.zeros(x.shape)
else:
result = np.zeros(y.shape)
for i in range(coef.shape[0]):
for j in range(coef.shape[1]):
if 1 < i + j < self.order + 1:
result = result + coef[i, j] * x ** i * y ** j
return result
class SIP(Model):
"""
Simple Imaging Polynomial (SIP) model.
The SIP convention is used to represent distortions in FITS image headers.
See [1]_ for a description of the SIP convention.
Parameters
----------
crpix : list or (2,) ndarray
CRPIX values
a_order : int
SIP polynomial order for first axis
b_order : int
SIP order for second axis
a_coeff : dict
SIP coefficients for first axis
b_coeff : dict
SIP coefficients for the second axis
ap_order : int
order for the inverse transformation (AP coefficients)
bp_order : int
order for the inverse transformation (BP coefficients)
ap_coeff : dict
coefficients for the inverse transform
bp_coeff : dict
coefficients for the inverse transform
References
----------
.. [1] `David Shupe, et al, ADASS, ASP Conference Series, Vol. 347, 2005
<https://ui.adsabs.harvard.edu/abs/2005ASPC..347..491S>`_
"""
n_inputs = 2
n_outputs = 2
_separable = False
def __init__(self, crpix, a_order, b_order, a_coeff={}, b_coeff={},
ap_order=None, bp_order=None, ap_coeff={}, bp_coeff={},
n_models=None, model_set_axis=None, name=None, meta=None):
self._crpix = crpix
self._a_order = a_order
self._b_order = b_order
self._a_coeff = a_coeff
self._b_coeff = b_coeff
self._ap_order = ap_order
self._bp_order = bp_order
self._ap_coeff = ap_coeff
self._bp_coeff = bp_coeff
self.shift_a = Shift(-crpix[0])
self.shift_b = Shift(-crpix[1])
self.sip1d_a = _SIP1D(a_order, coeff_prefix='A', n_models=n_models,
model_set_axis=model_set_axis, **a_coeff)
self.sip1d_b = _SIP1D(b_order, coeff_prefix='B', n_models=n_models,
model_set_axis=model_set_axis, **b_coeff)
super().__init__(n_models=n_models, model_set_axis=model_set_axis,
name=name, meta=meta)
self._inputs = ("u", "v")
self._outputs = ("x", "y")
def __repr__(self):
return (f"<{self.__class__.__name__}"
f"({[self.shift_a, self.shift_b, self.sip1d_a, self.sip1d_b]!r})>")
def __str__(self):
parts = [f'Model: {self.__class__.__name__}']
for model in [self.shift_a, self.shift_b, self.sip1d_a, self.sip1d_b]:
parts.append(indent(str(model), width=4))
parts.append('')
return '\n'.join(parts)
@property
def inverse(self):
if (self._ap_order is not None and self._bp_order is not None):
return InverseSIP(self._ap_order, self._bp_order,
self._ap_coeff, self._bp_coeff)
else:
raise NotImplementedError("SIP inverse coefficients are not available.")
def evaluate(self, x, y):
u = self.shift_a.evaluate(x, *self.shift_a.param_sets)
v = self.shift_b.evaluate(y, *self.shift_b.param_sets)
f = self.sip1d_a.evaluate(u, v, *self.sip1d_a.param_sets)
g = self.sip1d_b.evaluate(u, v, *self.sip1d_b.param_sets)
return f, g
class InverseSIP(Model):
"""
Inverse Simple Imaging Polynomial
Parameters
----------
ap_order : int
order for the inverse transformation (AP coefficients)
bp_order : int
order for the inverse transformation (BP coefficients)
ap_coeff : dict
coefficients for the inverse transform
bp_coeff : dict
coefficients for the inverse transform
"""
n_inputs = 2
n_outputs = 2
_separable = False
def __init__(self, ap_order, bp_order, ap_coeff={}, bp_coeff={},
n_models=None, model_set_axis=None, name=None, meta=None):
self._ap_order = ap_order
self._bp_order = bp_order
self._ap_coeff = ap_coeff
self._bp_coeff = bp_coeff
# define the 0th term in order to use Polynomial2D
ap_coeff.setdefault('AP_0_0', 0)
bp_coeff.setdefault('BP_0_0', 0)
ap_coeff_params = {k.replace('AP_', 'c'): v for k, v in ap_coeff.items()}
bp_coeff_params = {k.replace('BP_', 'c'): v for k, v in bp_coeff.items()}
self.sip1d_ap = Polynomial2D(degree=ap_order,
model_set_axis=model_set_axis,
**ap_coeff_params)
self.sip1d_bp = Polynomial2D(degree=bp_order,
model_set_axis=model_set_axis,
**bp_coeff_params)
super().__init__(n_models=n_models, model_set_axis=model_set_axis,
name=name, meta=meta)
def __repr__(self):
return f'<{self.__class__.__name__}({[self.sip1d_ap, self.sip1d_bp]!r})>'
def __str__(self):
parts = [f'Model: {self.__class__.__name__}']
for model in [self.sip1d_ap, self.sip1d_bp]:
parts.append(indent(str(model), width=4))
parts.append('')
return '\n'.join(parts)
def evaluate(self, x, y):
x1 = self.sip1d_ap.evaluate(x, y, *self.sip1d_ap.param_sets)
y1 = self.sip1d_bp.evaluate(x, y, *self.sip1d_bp.param_sets)
return x1, y1
|
6bcbc2a8ea0f19d1d77e4c24521eb0e7ed00399b5e6c505dd178c65b4d094d4a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tabular models.
Tabular models of any dimension can be created using `tabular_model`.
For convenience `Tabular1D` and `Tabular2D` are provided.
Examples
--------
>>> table = np.array([[ 3., 0., 0.],
... [ 0., 2., 0.],
... [ 0., 0., 0.]])
>>> points = ([1, 2, 3], [1, 2, 3])
>>> t2 = Tabular2D(points, lookup_table=table, bounds_error=False,
... fill_value=None, method='nearest')
"""
# pylint: disable=invalid-name
import numpy as np
from astropy import units as u
from .core import Model
try:
from scipy.interpolate import interpn
has_scipy = True
except ImportError:
has_scipy = False
__all__ = ['tabular_model', 'Tabular1D', 'Tabular2D']
__doctest_requires__ = {('tabular_model'): ['scipy']}
class _Tabular(Model):
"""
Returns an interpolated lookup table value.
Parameters
----------
points : tuple of ndarray of float, optional
The points defining the regular grid in n dimensions.
ndarray must have shapes (m1, ), ..., (mn, ),
lookup_table : array-like
The data on a regular grid in n dimensions.
Must have shapes (m1, ..., mn, ...)
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest", and "splinef2d". "splinef2d" is only supported for
2-dimensional data. Default is "linear".
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then ``fill_value`` is used.
fill_value : float or `~astropy.units.Quantity`, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated. Extrapolation is not supported by method
"splinef2d". If Quantity is given, it will be converted to the unit of
``lookup_table``, if applicable.
Returns
-------
value : ndarray
Interpolated values at input coordinates.
Raises
------
ImportError
Scipy is not installed.
Notes
-----
Uses `scipy.interpolate.interpn`.
"""
linear = False
fittable = False
standard_broadcasting = False
_is_dynamic = True
_id = 0
def __init__(self, points=None, lookup_table=None, method='linear',
bounds_error=True, fill_value=np.nan, **kwargs):
n_models = kwargs.get('n_models', 1)
if n_models > 1:
raise NotImplementedError('Only n_models=1 is supported.')
super().__init__(**kwargs)
self.outputs = ("y",)
if lookup_table is None:
raise ValueError('Must provide a lookup table.')
if not isinstance(lookup_table, u.Quantity):
lookup_table = np.asarray(lookup_table)
if self.lookup_table.ndim != lookup_table.ndim:
raise ValueError("lookup_table should be an array with "
f"{self.lookup_table.ndim} dimensions.")
if points is None:
points = tuple(np.arange(x, dtype=float)
for x in lookup_table.shape)
else:
if lookup_table.ndim == 1 and not isinstance(points, tuple):
points = (points,)
npts = len(points)
if npts != lookup_table.ndim:
raise ValueError(
"Expected grid points in "
f"{lookup_table.ndim} directions, got {npts}.")
if (npts > 1 and isinstance(points[0], u.Quantity) and
len({getattr(p, 'unit', None) for p in points}) > 1):
raise ValueError('points must all have the same unit.')
if isinstance(fill_value, u.Quantity):
if not isinstance(lookup_table, u.Quantity):
raise ValueError(f"fill value is in {fill_value.unit} but expected to be "
"unitless.")
fill_value = fill_value.to(lookup_table.unit).value
self.points = points
self.lookup_table = lookup_table
self.bounds_error = bounds_error
self.method = method
self.fill_value = fill_value
def __repr__(self):
return (f"<{self.__class__.__name__}(points={self.points}, "
f"lookup_table={self.lookup_table})>")
def __str__(self):
default_keywords = [
('Model', self.__class__.__name__),
('Name', self.name),
('N_inputs', self.n_inputs),
('N_outputs', self.n_outputs),
('Parameters', ""),
(' points', self.points),
(' lookup_table', self.lookup_table),
(' method', self.method),
(' fill_value', self.fill_value),
(' bounds_error', self.bounds_error)
]
parts = [f'{keyword}: {value}'
for keyword, value in default_keywords
if value is not None]
return '\n'.join(parts)
@property
def input_units(self):
pts = self.points[0]
if not isinstance(pts, u.Quantity):
return None
return {x: pts.unit for x in self.inputs}
@property
def return_units(self):
if not isinstance(self.lookup_table, u.Quantity):
return None
return {self.outputs[0]: self.lookup_table.unit}
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits,
``(points_low, points_high)``.
Examples
--------
>>> from astropy.modeling.models import Tabular1D, Tabular2D
>>> t1 = Tabular1D(points=[1, 2, 3], lookup_table=[10, 20, 30])
>>> t1.bounding_box
ModelBoundingBox(
intervals={
x: Interval(lower=1, upper=3)
}
model=Tabular1D(inputs=('x',))
order='C'
)
>>> t2 = Tabular2D(points=[[1, 2, 3], [2, 3, 4]],
... lookup_table=[[10, 20, 30], [20, 30, 40]])
>>> t2.bounding_box
ModelBoundingBox(
intervals={
x: Interval(lower=1, upper=3)
y: Interval(lower=2, upper=4)
}
model=Tabular2D(inputs=('x', 'y'))
order='C'
)
"""
bbox = [(min(p), max(p)) for p in self.points][::-1]
if len(bbox) == 1:
bbox = bbox[0]
return bbox
def evaluate(self, *inputs):
"""
Return the interpolated values at the input coordinates.
Parameters
----------
inputs : list of scalar or list of ndarray
Input coordinates. The number of inputs must be equal
to the dimensions of the lookup table.
"""
inputs = np.broadcast_arrays(*inputs)
shape = inputs[0].shape
inputs = [inp.flatten() for inp in inputs[: self.n_inputs]]
inputs = np.array(inputs).T
if not has_scipy: # pragma: no cover
raise ImportError("Tabular model requires scipy.")
result = interpn(self.points, self.lookup_table, inputs,
method=self.method, bounds_error=self.bounds_error,
fill_value=self.fill_value)
# return_units not respected when points has no units
if (isinstance(self.lookup_table, u.Quantity) and
not isinstance(self.points[0], u.Quantity)):
result = result * self.lookup_table.unit
if self.n_outputs == 1:
result = result.reshape(shape)
else:
result = [r.reshape(shape) for r in result]
return result
@property
def inverse(self):
if self.n_inputs == 1:
# If the wavelength array is descending instead of ascending, both
# points and lookup_table need to be reversed in the inverse transform
# for scipy.interpolate to work properly
if np.all(np.diff(self.lookup_table) > 0):
# ascending case
points = self.lookup_table
lookup_table = self.points[0]
elif np.all(np.diff(self.lookup_table) < 0):
# descending case, reverse order
points = self.lookup_table[::-1]
lookup_table = self.points[0][::-1]
else:
# equal-valued or double-valued lookup_table
raise NotImplementedError
return Tabular1D(points=points, lookup_table=lookup_table, method=self.method,
bounds_error=self.bounds_error, fill_value=self.fill_value)
raise NotImplementedError("An analytical inverse transform "
"has not been implemented for this model.")
def tabular_model(dim, name=None):
"""
Make a ``Tabular`` model where ``n_inputs`` is
based on the dimension of the lookup_table.
This model has to be further initialized and when evaluated
returns the interpolated values.
Parameters
----------
dim : int
Dimensions of the lookup table.
name : str
Name for the class.
Examples
--------
>>> table = np.array([[3., 0., 0.],
... [0., 2., 0.],
... [0., 0., 0.]])
>>> tab = tabular_model(2, name='Tabular2D')
>>> print(tab)
<class 'astropy.modeling.tabular.Tabular2D'>
Name: Tabular2D
N_inputs: 2
N_outputs: 1
>>> points = ([1, 2, 3], [1, 2, 3])
Setting fill_value to None, allows extrapolation.
>>> m = tab(points, lookup_table=table, name='my_table',
... bounds_error=False, fill_value=None, method='nearest')
>>> xinterp = [0, 1, 1.5, 2.72, 3.14]
>>> m(xinterp, xinterp) # doctest: +FLOAT_CMP
array([3., 3., 3., 0., 0.])
"""
if dim < 1:
raise ValueError('Lookup table must have at least one dimension.')
table = np.zeros([2] * dim)
members = {'lookup_table': table, 'n_inputs': dim, 'n_outputs': 1}
if dim == 1:
members['_separable'] = True
else:
members['_separable'] = False
if name is None:
model_id = _Tabular._id
_Tabular._id += 1
name = f'Tabular{model_id}'
model_class = type(str(name), (_Tabular,), members)
model_class.__module__ = 'astropy.modeling.tabular'
return model_class
Tabular1D = tabular_model(1, name='Tabular1D')
Tabular2D = tabular_model(2, name='Tabular2D')
_tab_docs = """
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest", and "splinef2d". "splinef2d" is only supported for
2-dimensional data. Default is "linear".
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then ``fill_value`` is used.
fill_value : float, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated. Extrapolation is not supported by method
"splinef2d".
Returns
-------
value : ndarray
Interpolated values at input coordinates.
Raises
------
ImportError
Scipy is not installed.
Notes
-----
Uses `scipy.interpolate.interpn`.
"""
Tabular1D.__doc__ = """
Tabular model in 1D.
Returns an interpolated lookup table value.
Parameters
----------
points : array-like of float of ndim=1.
The points defining the regular grid in n dimensions.
lookup_table : array-like, of ndim=1.
The data in one dimensions.
""" + _tab_docs
Tabular2D.__doc__ = """
Tabular model in 2D.
Returns an interpolated lookup table value.
Parameters
----------
points : tuple of ndarray of float, optional
The points defining the regular grid in n dimensions.
ndarray with shapes (m1, m2).
lookup_table : array-like
The data on a regular grid in 2 dimensions.
Shape (m1, m2).
""" + _tab_docs
|
05a0be61992613aea3bbf90faab11731a210c77b02f10201ec64ec2271b2baaf | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import os
import select
import socket
import threading
import time
import uuid
import warnings
import queue
import xmlrpc.client as xmlrpc
from urllib.parse import urlunparse
from astropy import log
from .constants import SAMP_STATUS_OK
from .constants import __profile_version__
from .errors import SAMPWarning, SAMPHubError, SAMPProxyError
from .utils import internet_on, ServerProxyPool, _HubAsClient
from .lockfile_helpers import read_lockfile, create_lock_file
from .standard_profile import ThreadingXMLRPCServer
from .web_profile import WebProfileXMLRPCServer, web_profile_text_dialog
__all__ = ['SAMPHubServer', 'WebProfileDialog']
__doctest_skip__ = ['.', 'SAMPHubServer.*']
class SAMPHubServer:
"""
SAMP Hub Server.
Parameters
----------
secret : str, optional
The secret code to use for the SAMP lockfile. If none is is specified,
the :func:`uuid.uuid1` function is used to generate one.
addr : str, optional
Listening address (or IP). This defaults to 127.0.0.1 if the internet
is not reachable, otherwise it defaults to the host name.
port : int, optional
Listening XML-RPC server socket port. If left set to 0 (the default),
the operating system will select a free port.
lockfile : str, optional
Custom lockfile name.
timeout : int, optional
Hub inactivity timeout. If ``timeout > 0`` then the Hub automatically
stops after an inactivity period longer than ``timeout`` seconds. By
default ``timeout`` is set to 0 (Hub never expires).
client_timeout : int, optional
Client inactivity timeout. If ``client_timeout > 0`` then the Hub
automatically unregisters the clients which result inactive for a
period longer than ``client_timeout`` seconds. By default
``client_timeout`` is set to 0 (clients never expire).
mode : str, optional
Defines the Hub running mode. If ``mode`` is ``'single'`` then the Hub
runs using the standard ``.samp`` lock-file, having a single instance
for user desktop session. Otherwise, if ``mode`` is ``'multiple'``,
then the Hub runs using a non-standard lock-file, placed in
``.samp-1`` directory, of the form ``samp-hub-<UUID>``, where
``<UUID>`` is a unique UUID assigned to the hub.
label : str, optional
A string used to label the Hub with a human readable name. This string
is written in the lock-file assigned to the ``hub.label`` token.
web_profile : bool, optional
Enables or disables the Web Profile support.
web_profile_dialog : class, optional
Allows a class instance to be specified using ``web_profile_dialog``
to replace the terminal-based message with e.g. a GUI pop-up. Two
`queue.Queue` instances will be added to the instance as attributes
``queue_request`` and ``queue_result``. When a request is received via
the ``queue_request`` queue, the pop-up should be displayed, and a
value of `True` or `False` should be added to ``queue_result``
depending on whether the user accepted or refused the connection.
web_port : int, optional
The port to use for web SAMP. This should not be changed except for
testing purposes, since web SAMP should always use port 21012.
pool_size : int, optional
The number of socket connections opened to communicate with the
clients.
"""
def __init__(self, secret=None, addr=None, port=0, lockfile=None,
timeout=0, client_timeout=0, mode='single', label="",
web_profile=True, web_profile_dialog=None, web_port=21012,
pool_size=20):
# Generate random ID for the hub
self._id = str(uuid.uuid1())
# General settings
self._is_running = False
self._customlockfilename = lockfile
self._lockfile = None
self._addr = addr
self._port = port
self._mode = mode
self._label = label
self._timeout = timeout
self._client_timeout = client_timeout
self._pool_size = pool_size
# Web profile specific attributes
self._web_profile = web_profile
self._web_profile_dialog = web_profile_dialog
self._web_port = web_port
self._web_profile_server = None
self._web_profile_callbacks = {}
self._web_profile_requests_queue = None
self._web_profile_requests_result = None
self._web_profile_requests_semaphore = None
self._host_name = "127.0.0.1"
if internet_on():
try:
self._host_name = socket.getfqdn()
socket.getaddrinfo(self._addr or self._host_name,
self._port or 0)
except OSError:
self._host_name = "127.0.0.1"
# Threading stuff
self._thread_lock = threading.Lock()
self._thread_run = None
self._thread_hub_timeout = None
self._thread_client_timeout = None
self._launched_threads = []
# Variables for timeout testing:
self._last_activity_time = None
self._client_activity_time = {}
# Hub message id counter, used to create hub msg ids
self._hub_msg_id_counter = 0
# Hub secret code
self._hub_secret_code_customized = secret
self._hub_secret = self._create_secret_code()
# Hub public id (as SAMP client)
self._hub_public_id = ""
# Client ids
# {private_key: (public_id, timestamp)}
self._private_keys = {}
# Metadata per client
# {private_key: metadata}
self._metadata = {}
# List of subscribed clients per MType
# {mtype: private_key list}
self._mtype2ids = {}
# List of subscribed MTypes per client
# {private_key: mtype list}
self._id2mtypes = {}
# List of XML-RPC addresses per client
# {public_id: (XML-RPC address, ServerProxyPool instance)}
self._xmlrpc_endpoints = {}
# Synchronous message id heap
self._sync_msg_ids_heap = {}
# Public ids counter
self._client_id_counter = -1
@property
def id(self):
"""
The unique hub ID.
"""
return self._id
def _register_standard_api(self, server):
# Standard Profile only operations
server.register_function(self._ping, 'samp.hub.ping')
server.register_function(self._set_xmlrpc_callback, 'samp.hub.setXmlrpcCallback')
# Standard API operations
server.register_function(self._register, 'samp.hub.register')
server.register_function(self._unregister, 'samp.hub.unregister')
server.register_function(self._declare_metadata, 'samp.hub.declareMetadata')
server.register_function(self._get_metadata, 'samp.hub.getMetadata')
server.register_function(self._declare_subscriptions, 'samp.hub.declareSubscriptions')
server.register_function(self._get_subscriptions, 'samp.hub.getSubscriptions')
server.register_function(self._get_registered_clients, 'samp.hub.getRegisteredClients')
server.register_function(self._get_subscribed_clients, 'samp.hub.getSubscribedClients')
server.register_function(self._notify, 'samp.hub.notify')
server.register_function(self._notify_all, 'samp.hub.notifyAll')
server.register_function(self._call, 'samp.hub.call')
server.register_function(self._call_all, 'samp.hub.callAll')
server.register_function(self._call_and_wait, 'samp.hub.callAndWait')
server.register_function(self._reply, 'samp.hub.reply')
def _register_web_profile_api(self, server):
# Web Profile methods like Standard Profile
server.register_function(self._ping, 'samp.webhub.ping')
server.register_function(self._unregister, 'samp.webhub.unregister')
server.register_function(self._declare_metadata, 'samp.webhub.declareMetadata')
server.register_function(self._get_metadata, 'samp.webhub.getMetadata')
server.register_function(self._declare_subscriptions, 'samp.webhub.declareSubscriptions')
server.register_function(self._get_subscriptions, 'samp.webhub.getSubscriptions')
server.register_function(self._get_registered_clients, 'samp.webhub.getRegisteredClients')
server.register_function(self._get_subscribed_clients, 'samp.webhub.getSubscribedClients')
server.register_function(self._notify, 'samp.webhub.notify')
server.register_function(self._notify_all, 'samp.webhub.notifyAll')
server.register_function(self._call, 'samp.webhub.call')
server.register_function(self._call_all, 'samp.webhub.callAll')
server.register_function(self._call_and_wait, 'samp.webhub.callAndWait')
server.register_function(self._reply, 'samp.webhub.reply')
# Methods particularly for Web Profile
server.register_function(self._web_profile_register, 'samp.webhub.register')
server.register_function(self._web_profile_allowReverseCallbacks, 'samp.webhub.allowReverseCallbacks')
server.register_function(self._web_profile_pullCallbacks, 'samp.webhub.pullCallbacks')
def _start_standard_server(self):
self._server = ThreadingXMLRPCServer(
(self._addr or self._host_name, self._port or 0),
log, logRequests=False, allow_none=True)
prot = 'http'
self._port = self._server.socket.getsockname()[1]
addr = f"{self._addr or self._host_name}:{self._port}"
self._url = urlunparse((prot, addr, '', '', '', ''))
self._server.register_introspection_functions()
self._register_standard_api(self._server)
def _start_web_profile_server(self):
self._web_profile_requests_queue = queue.Queue(1)
self._web_profile_requests_result = queue.Queue(1)
self._web_profile_requests_semaphore = queue.Queue(1)
if self._web_profile_dialog is not None:
# TODO: Some sort of duck-typing on the web_profile_dialog object
self._web_profile_dialog.queue_request = \
self._web_profile_requests_queue
self._web_profile_dialog.queue_result = \
self._web_profile_requests_result
try:
self._web_profile_server = WebProfileXMLRPCServer(
('localhost', self._web_port), log, logRequests=False,
allow_none=True)
self._web_port = self._web_profile_server.socket.getsockname()[1]
self._web_profile_server.register_introspection_functions()
self._register_web_profile_api(self._web_profile_server)
log.info("Hub set to run with Web Profile support enabled.")
except OSError:
log.warning("Port {} already in use. Impossible to run the "
"Hub with Web Profile support.".format(self._web_port),
SAMPWarning)
self._web_profile = False
# Cleanup
self._web_profile_requests_queue = None
self._web_profile_requests_result = None
self._web_profile_requests_semaphore = None
def _launch_thread(self, group=None, target=None, name=None, args=None):
# Remove inactive threads
remove = []
for t in self._launched_threads:
if not t.is_alive():
remove.append(t)
for t in remove:
self._launched_threads.remove(t)
# Start new thread
t = threading.Thread(group=group, target=target, name=name, args=args)
t.start()
# Add to list of launched threads
self._launched_threads.append(t)
def _join_launched_threads(self, timeout=None):
for t in self._launched_threads:
t.join(timeout=timeout)
def _timeout_test_hub(self):
if self._timeout == 0:
return
last = time.time()
while self._is_running:
time.sleep(0.05) # keep this small to check _is_running often
now = time.time()
if now - last > 1.:
with self._thread_lock:
if self._last_activity_time is not None:
if now - self._last_activity_time >= self._timeout:
warnings.warn("Timeout expired, Hub is shutting down!",
SAMPWarning)
self.stop()
return
last = now
def _timeout_test_client(self):
if self._client_timeout == 0:
return
last = time.time()
while self._is_running:
time.sleep(0.05) # keep this small to check _is_running often
now = time.time()
if now - last > 1.:
for private_key in self._client_activity_time.keys():
if (now - self._client_activity_time[private_key] > self._client_timeout
and private_key != self._hub_private_key):
warnings.warn(
f"Client {private_key} timeout expired!",
SAMPWarning)
self._notify_disconnection(private_key)
self._unregister(private_key)
last = now
def _hub_as_client_request_handler(self, method, args):
if method == 'samp.client.receiveCall':
return self._receive_call(*args)
elif method == 'samp.client.receiveNotification':
return self._receive_notification(*args)
elif method == 'samp.client.receiveResponse':
return self._receive_response(*args)
elif method == 'samp.app.ping':
return self._ping(*args)
def _setup_hub_as_client(self):
hub_metadata = {"samp.name": "Astropy SAMP Hub",
"samp.description.text": self._label,
"author.name": "The Astropy Collaboration",
"samp.documentation.url": "https://docs.astropy.org/en/stable/samp",
"samp.icon.url": self._url + "/samp/icon"}
result = self._register(self._hub_secret)
self._hub_public_id = result["samp.self-id"]
self._hub_private_key = result["samp.private-key"]
self._set_xmlrpc_callback(self._hub_private_key, self._url)
self._declare_metadata(self._hub_private_key, hub_metadata)
self._declare_subscriptions(self._hub_private_key,
{"samp.app.ping": {},
"x-samp.query.by-meta": {}})
def start(self, wait=False):
"""
Start the current SAMP Hub instance and create the lock file. Hub
start-up can be blocking or non blocking depending on the ``wait``
parameter.
Parameters
----------
wait : bool
If `True` then the Hub process is joined with the caller, blocking
the code flow. Usually `True` option is used to run a stand-alone
Hub in an executable script. If `False` (default), then the Hub
process runs in a separated thread. `False` is usually used in a
Python shell.
"""
if self._is_running:
raise SAMPHubError("Hub is already running")
if self._lockfile is not None:
raise SAMPHubError("Hub is not running but lockfile is set")
if self._web_profile:
self._start_web_profile_server()
self._start_standard_server()
self._lockfile = create_lock_file(lockfilename=self._customlockfilename,
mode=self._mode, hub_id=self.id,
hub_params=self.params)
self._update_last_activity_time()
self._setup_hub_as_client()
self._start_threads()
log.info("Hub started")
if wait and self._is_running:
self._thread_run.join()
self._thread_run = None
@property
def params(self):
"""
The hub parameters (which are written to the logfile)
"""
params = {}
# Keys required by standard profile
params['samp.secret'] = self._hub_secret
params['samp.hub.xmlrpc.url'] = self._url
params['samp.profile.version'] = __profile_version__
# Custom keys
params['hub.id'] = self.id
params['hub.label'] = self._label or f"Hub {self.id}"
return params
def _start_threads(self):
self._thread_run = threading.Thread(target=self._serve_forever)
self._thread_run.daemon = True
if self._timeout > 0:
self._thread_hub_timeout = threading.Thread(
target=self._timeout_test_hub,
name="Hub timeout test")
self._thread_hub_timeout.daemon = True
else:
self._thread_hub_timeout = None
if self._client_timeout > 0:
self._thread_client_timeout = threading.Thread(
target=self._timeout_test_client,
name="Client timeout test")
self._thread_client_timeout.daemon = True
else:
self._thread_client_timeout = None
self._is_running = True
self._thread_run.start()
if self._thread_hub_timeout is not None:
self._thread_hub_timeout.start()
if self._thread_client_timeout is not None:
self._thread_client_timeout.start()
def _create_secret_code(self):
if self._hub_secret_code_customized is not None:
return self._hub_secret_code_customized
else:
return str(uuid.uuid1())
def stop(self):
"""
Stop the current SAMP Hub instance and delete the lock file.
"""
if not self._is_running:
return
log.info("Hub is stopping...")
self._notify_shutdown()
self._is_running = False
if self._lockfile and os.path.isfile(self._lockfile):
lockfiledict = read_lockfile(self._lockfile)
if lockfiledict['samp.secret'] == self._hub_secret:
os.remove(self._lockfile)
self._lockfile = None
# Reset variables
# TODO: What happens if not all threads are stopped after timeout?
self._join_all_threads(timeout=10.)
self._hub_msg_id_counter = 0
self._hub_secret = self._create_secret_code()
self._hub_public_id = ""
self._metadata = {}
self._private_keys = {}
self._mtype2ids = {}
self._id2mtypes = {}
self._xmlrpc_endpoints = {}
self._last_activity_time = None
log.info("Hub stopped.")
def _join_all_threads(self, timeout=None):
# In some cases, ``stop`` may be called from some of the sub-threads,
# so we just need to make sure that we don't try and shut down the
# calling thread.
current_thread = threading.current_thread()
if self._thread_run is not current_thread:
self._thread_run.join(timeout=timeout)
if not self._thread_run.is_alive():
self._thread_run = None
if self._thread_hub_timeout is not None and self._thread_hub_timeout is not current_thread:
self._thread_hub_timeout.join(timeout=timeout)
if not self._thread_hub_timeout.is_alive():
self._thread_hub_timeout = None
if self._thread_client_timeout is not None and self._thread_client_timeout is not current_thread:
self._thread_client_timeout.join(timeout=timeout)
if not self._thread_client_timeout.is_alive():
self._thread_client_timeout = None
self._join_launched_threads(timeout=timeout)
@property
def is_running(self):
"""Return an information concerning the Hub running status.
Returns
-------
running : bool
Is the hub running?
"""
return self._is_running
def _serve_forever(self):
while self._is_running:
try:
read_ready = select.select([self._server.socket], [], [], 0.01)[0]
except OSError as exc:
warnings.warn(f"Call to select() in SAMPHubServer failed: {exc}",
SAMPWarning)
else:
if read_ready:
self._server.handle_request()
if self._web_profile:
# We now check if there are any connection requests from the
# web profile, and if so, we initialize the pop-up.
if self._web_profile_dialog is None:
try:
request = self._web_profile_requests_queue.get_nowait()
except queue.Empty:
pass
else:
web_profile_text_dialog(request, self._web_profile_requests_result)
# We now check for requests over the web profile socket, and we
# also update the pop-up in case there are any changes.
try:
read_ready = select.select([self._web_profile_server.socket], [], [], 0.01)[0]
except OSError as exc:
warnings.warn(f"Call to select() in SAMPHubServer failed: {exc}",
SAMPWarning)
else:
if read_ready:
self._web_profile_server.handle_request()
self._server.server_close()
if self._web_profile_server is not None:
self._web_profile_server.server_close()
def _notify_shutdown(self):
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.shutdown")
for mtype in msubs:
if mtype in self._mtype2ids:
for key in self._mtype2ids[mtype]:
self._notify_(self._hub_private_key,
self._private_keys[key][0],
{"samp.mtype": "samp.hub.event.shutdown",
"samp.params": {}})
def _notify_register(self, private_key):
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.register")
for mtype in msubs:
if mtype in self._mtype2ids:
public_id = self._private_keys[private_key][0]
for key in self._mtype2ids[mtype]:
# if key != private_key:
self._notify(self._hub_private_key,
self._private_keys[key][0],
{"samp.mtype": "samp.hub.event.register",
"samp.params": {"id": public_id}})
def _notify_unregister(self, private_key):
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.unregister")
for mtype in msubs:
if mtype in self._mtype2ids:
public_id = self._private_keys[private_key][0]
for key in self._mtype2ids[mtype]:
if key != private_key:
self._notify(self._hub_private_key,
self._private_keys[key][0],
{"samp.mtype": "samp.hub.event.unregister",
"samp.params": {"id": public_id}})
def _notify_metadata(self, private_key):
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.metadata")
for mtype in msubs:
if mtype in self._mtype2ids:
public_id = self._private_keys[private_key][0]
for key in self._mtype2ids[mtype]:
# if key != private_key:
self._notify(self._hub_private_key,
self._private_keys[key][0],
{"samp.mtype": "samp.hub.event.metadata",
"samp.params": {"id": public_id,
"metadata": self._metadata[private_key]}
})
def _notify_subscriptions(self, private_key):
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.subscriptions")
for mtype in msubs:
if mtype in self._mtype2ids:
public_id = self._private_keys[private_key][0]
for key in self._mtype2ids[mtype]:
self._notify(self._hub_private_key,
self._private_keys[key][0],
{"samp.mtype": "samp.hub.event.subscriptions",
"samp.params": {"id": public_id,
"subscriptions": self._id2mtypes[private_key]}
})
def _notify_disconnection(self, private_key):
def _xmlrpc_call_disconnect(endpoint, private_key, hub_public_id, message):
endpoint.samp.client.receiveNotification(private_key, hub_public_id, message)
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.disconnect")
public_id = self._private_keys[private_key][0]
endpoint = self._xmlrpc_endpoints[public_id][1]
for mtype in msubs:
if mtype in self._mtype2ids and private_key in self._mtype2ids[mtype]:
log.debug(f"notify disconnection to {public_id}")
self._launch_thread(target=_xmlrpc_call_disconnect,
args=(endpoint, private_key,
self._hub_public_id,
{"samp.mtype": "samp.hub.disconnect",
"samp.params": {"reason": "Timeout expired!"}}))
def _ping(self):
self._update_last_activity_time()
log.debug("ping")
return "1"
def _query_by_metadata(self, key, value):
public_id_list = []
for private_id in self._metadata:
if key in self._metadata[private_id]:
if self._metadata[private_id][key] == value:
public_id_list.append(self._private_keys[private_id][0])
return public_id_list
def _set_xmlrpc_callback(self, private_key, xmlrpc_addr):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
if private_key == self._hub_private_key:
public_id = self._private_keys[private_key][0]
self._xmlrpc_endpoints[public_id] = \
(xmlrpc_addr, _HubAsClient(self._hub_as_client_request_handler))
return ""
# Dictionary stored with the public id
log.debug(f"set_xmlrpc_callback: {private_key} {xmlrpc_addr}")
server_proxy_pool = None
server_proxy_pool = ServerProxyPool(self._pool_size,
xmlrpc.ServerProxy,
xmlrpc_addr, allow_none=1)
public_id = self._private_keys[private_key][0]
self._xmlrpc_endpoints[public_id] = (xmlrpc_addr,
server_proxy_pool)
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
return ""
def _perform_standard_register(self):
with self._thread_lock:
private_key, public_id = self._get_new_ids()
self._private_keys[private_key] = (public_id, time.time())
self._update_last_activity_time(private_key)
self._notify_register(private_key)
log.debug(f"register: private-key = {private_key} and self-id = {public_id}")
return {"samp.self-id": public_id,
"samp.private-key": private_key,
"samp.hub-id": self._hub_public_id}
def _register(self, secret):
self._update_last_activity_time()
if secret == self._hub_secret:
return self._perform_standard_register()
else:
# return {"samp.self-id": "", "samp.private-key": "", "samp.hub-id": ""}
raise SAMPProxyError(7, "Bad secret code")
def _get_new_ids(self):
private_key = str(uuid.uuid1())
self._client_id_counter += 1
public_id = 'cli#hub'
if self._client_id_counter > 0:
public_id = f"cli#{self._client_id_counter}"
return private_key, public_id
def _unregister(self, private_key):
self._update_last_activity_time()
public_key = ""
self._notify_unregister(private_key)
with self._thread_lock:
if private_key in self._private_keys:
public_key = self._private_keys[private_key][0]
del self._private_keys[private_key]
else:
return ""
if private_key in self._metadata:
del self._metadata[private_key]
if private_key in self._id2mtypes:
del self._id2mtypes[private_key]
for mtype in self._mtype2ids.keys():
if private_key in self._mtype2ids[mtype]:
self._mtype2ids[mtype].remove(private_key)
if public_key in self._xmlrpc_endpoints:
del self._xmlrpc_endpoints[public_key]
if private_key in self._client_activity_time:
del self._client_activity_time[private_key]
if self._web_profile:
if private_key in self._web_profile_callbacks:
del self._web_profile_callbacks[private_key]
self._web_profile_server.remove_client(private_key)
log.debug(f"unregister {public_key} ({private_key})")
return ""
def _declare_metadata(self, private_key, metadata):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
log.debug("declare_metadata: private-key = {} metadata = {}"
.format(private_key, str(metadata)))
self._metadata[private_key] = metadata
self._notify_metadata(private_key)
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
return ""
def _get_metadata(self, private_key, client_id):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
client_private_key = self._public_id_to_private_key(client_id)
log.debug("get_metadata: private-key = {} client-id = {}"
.format(private_key, client_id))
if client_private_key is not None:
if client_private_key in self._metadata:
log.debug(f"--> metadata = {self._metadata[client_private_key]}")
return self._metadata[client_private_key]
else:
return {}
else:
raise SAMPProxyError(6, "Invalid client ID")
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
def _declare_subscriptions(self, private_key, mtypes):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
log.debug("declare_subscriptions: private-key = {} mtypes = {}"
.format(private_key, str(mtypes)))
# remove subscription to previous mtypes
if private_key in self._id2mtypes:
prev_mtypes = self._id2mtypes[private_key]
for mtype in prev_mtypes:
try:
self._mtype2ids[mtype].remove(private_key)
except ValueError: # private_key is not in list
pass
self._id2mtypes[private_key] = copy.deepcopy(mtypes)
# remove duplicated MType for wildcard overwriting
original_mtypes = copy.deepcopy(mtypes)
for mtype in original_mtypes:
if mtype.endswith("*"):
for mtype2 in original_mtypes:
if mtype2.startswith(mtype[:-1]) and \
mtype2 != mtype:
if mtype2 in mtypes:
del(mtypes[mtype2])
log.debug("declare_subscriptions: subscriptions accepted from "
"{} => {}".format(private_key, str(mtypes)))
for mtype in mtypes:
if mtype in self._mtype2ids:
if private_key not in self._mtype2ids[mtype]:
self._mtype2ids[mtype].append(private_key)
else:
self._mtype2ids[mtype] = [private_key]
self._notify_subscriptions(private_key)
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
return ""
def _get_subscriptions(self, private_key, client_id):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
client_private_key = self._public_id_to_private_key(client_id)
if client_private_key is not None:
if client_private_key in self._id2mtypes:
log.debug("get_subscriptions: client-id = {} mtypes = {}"
.format(client_id,
str(self._id2mtypes[client_private_key])))
return self._id2mtypes[client_private_key]
else:
log.debug("get_subscriptions: client-id = {} mtypes = "
"missing".format(client_id))
return {}
else:
raise SAMPProxyError(6, "Invalid client ID")
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
def _get_registered_clients(self, private_key):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
reg_clients = []
for pkey in self._private_keys.keys():
if pkey != private_key:
reg_clients.append(self._private_keys[pkey][0])
log.debug("get_registered_clients: private_key = {} clients = {}"
.format(private_key, reg_clients))
return reg_clients
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
def _get_subscribed_clients(self, private_key, mtype):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
sub_clients = {}
for pkey in self._private_keys.keys():
if pkey != private_key and self._is_subscribed(pkey, mtype):
sub_clients[self._private_keys[pkey][0]] = {}
log.debug("get_subscribed_clients: private_key = {} mtype = {} "
"clients = {}".format(private_key, mtype, sub_clients))
return sub_clients
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
@staticmethod
def get_mtype_subtypes(mtype):
"""
Return a list containing all the possible wildcarded subtypes of MType.
Parameters
----------
mtype : str
MType to be parsed.
Returns
-------
types : list
List of subtypes
Examples
--------
>>> from astropy.samp import SAMPHubServer
>>> SAMPHubServer.get_mtype_subtypes("samp.app.ping")
['samp.app.ping', 'samp.app.*', 'samp.*', '*']
"""
subtypes = []
msubs = mtype.split(".")
indexes = list(range(len(msubs)))
indexes.reverse()
indexes.append(-1)
for i in indexes:
tmp_mtype = ".".join(msubs[:i + 1])
if tmp_mtype != mtype:
if tmp_mtype != "":
tmp_mtype = tmp_mtype + ".*"
else:
tmp_mtype = "*"
subtypes.append(tmp_mtype)
return subtypes
def _is_subscribed(self, private_key, mtype):
subscribed = False
msubs = SAMPHubServer.get_mtype_subtypes(mtype)
for msub in msubs:
if msub in self._mtype2ids:
if private_key in self._mtype2ids[msub]:
subscribed = True
return subscribed
def _notify(self, private_key, recipient_id, message):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
if self._is_subscribed(self._public_id_to_private_key(recipient_id),
message["samp.mtype"]) is False:
raise SAMPProxyError(2, "Client {} not subscribed to MType {}"
.format(recipient_id, message["samp.mtype"]))
self._launch_thread(target=self._notify_, args=(private_key,
recipient_id,
message))
return {}
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
def _notify_(self, sender_private_key, recipient_public_id, message):
if sender_private_key not in self._private_keys:
return
sender_public_id = self._private_keys[sender_private_key][0]
try:
log.debug("notify {} from {} to {}".format(
message["samp.mtype"], sender_public_id,
recipient_public_id))
recipient_private_key = self._public_id_to_private_key(recipient_public_id)
arg_params = (sender_public_id, message)
samp_method_name = "receiveNotification"
self._retry_method(recipient_private_key, recipient_public_id, samp_method_name, arg_params)
except Exception as exc:
warnings.warn("{} notification from client {} to client {} "
"failed [{}]".format(message["samp.mtype"],
sender_public_id,
recipient_public_id, exc),
SAMPWarning)
def _notify_all(self, private_key, message):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
if "samp.mtype" not in message:
raise SAMPProxyError(3, "samp.mtype keyword is missing")
recipient_ids = self._notify_all_(private_key, message)
return recipient_ids
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
def _notify_all_(self, sender_private_key, message):
recipient_ids = []
msubs = SAMPHubServer.get_mtype_subtypes(message["samp.mtype"])
for mtype in msubs:
if mtype in self._mtype2ids:
for key in self._mtype2ids[mtype]:
if key != sender_private_key:
_recipient_id = self._private_keys[key][0]
recipient_ids.append(_recipient_id)
self._launch_thread(target=self._notify,
args=(sender_private_key,
_recipient_id, message)
)
return recipient_ids
def _call(self, private_key, recipient_id, msg_tag, message):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
if self._is_subscribed(self._public_id_to_private_key(recipient_id),
message["samp.mtype"]) is False:
raise SAMPProxyError(2, "Client {} not subscribed to MType {}"
.format(recipient_id, message["samp.mtype"]))
public_id = self._private_keys[private_key][0]
msg_id = self._get_new_hub_msg_id(public_id, msg_tag)
self._launch_thread(target=self._call_, args=(private_key, public_id,
recipient_id, msg_id,
message))
return msg_id
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
def _call_(self, sender_private_key, sender_public_id,
recipient_public_id, msg_id, message):
if sender_private_key not in self._private_keys:
return
try:
log.debug("call {} from {} to {} ({})".format(
msg_id.split(";;")[0], sender_public_id,
recipient_public_id, message["samp.mtype"]))
recipient_private_key = self._public_id_to_private_key(recipient_public_id)
arg_params = (sender_public_id, msg_id, message)
samp_methodName = "receiveCall"
self._retry_method(recipient_private_key, recipient_public_id, samp_methodName, arg_params)
except Exception as exc:
warnings.warn("{} call {} from client {} to client {} failed "
"[{},{}]".format(message["samp.mtype"],
msg_id.split(";;")[0],
sender_public_id,
recipient_public_id, type(exc), exc),
SAMPWarning)
def _call_all(self, private_key, msg_tag, message):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
if "samp.mtype" not in message:
raise SAMPProxyError(3, "samp.mtype keyword is missing in "
"message tagged as {}".format(msg_tag))
public_id = self._private_keys[private_key][0]
msg_id = self._call_all_(private_key, public_id, msg_tag, message)
return msg_id
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
def _call_all_(self, sender_private_key, sender_public_id, msg_tag,
message):
msg_id = {}
msubs = SAMPHubServer.get_mtype_subtypes(message["samp.mtype"])
for mtype in msubs:
if mtype in self._mtype2ids:
for key in self._mtype2ids[mtype]:
if key != sender_private_key:
_msg_id = self._get_new_hub_msg_id(sender_public_id,
msg_tag)
receiver_public_id = self._private_keys[key][0]
msg_id[receiver_public_id] = _msg_id
self._launch_thread(target=self._call_,
args=(sender_private_key,
sender_public_id,
receiver_public_id, _msg_id,
message))
return msg_id
def _call_and_wait(self, private_key, recipient_id, message, timeout):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
timeout = int(timeout)
now = time.time()
response = {}
msg_id = self._call(private_key, recipient_id, "samp::sync::call",
message)
self._sync_msg_ids_heap[msg_id] = None
while self._is_running:
if 0 < timeout <= time.time() - now:
del(self._sync_msg_ids_heap[msg_id])
raise SAMPProxyError(1, "Timeout expired!")
if self._sync_msg_ids_heap[msg_id] is not None:
response = copy.deepcopy(self._sync_msg_ids_heap[msg_id])
del(self._sync_msg_ids_heap[msg_id])
break
time.sleep(0.01)
return response
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
def _reply(self, private_key, msg_id, response):
"""
The main method that gets called for replying. This starts up an
asynchronous reply thread and returns.
"""
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
self._launch_thread(target=self._reply_, args=(private_key, msg_id,
response))
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
return {}
def _reply_(self, responder_private_key, msg_id, response):
if responder_private_key not in self._private_keys or not msg_id:
return
responder_public_id = self._private_keys[responder_private_key][0]
counter, hub_public_id, recipient_public_id, recipient_msg_tag = msg_id.split(";;", 3)
try:
log.debug("reply {} from {} to {}".format(
counter, responder_public_id, recipient_public_id))
if recipient_msg_tag == "samp::sync::call":
if msg_id in self._sync_msg_ids_heap.keys():
self._sync_msg_ids_heap[msg_id] = response
else:
recipient_private_key = self._public_id_to_private_key(recipient_public_id)
arg_params = (responder_public_id, recipient_msg_tag, response)
samp_method_name = "receiveResponse"
self._retry_method(recipient_private_key, recipient_public_id, samp_method_name, arg_params)
except Exception as exc:
warnings.warn("{} reply from client {} to client {} failed [{}]"
.format(recipient_msg_tag, responder_public_id,
recipient_public_id, exc),
SAMPWarning)
def _retry_method(self, recipient_private_key, recipient_public_id, samp_method_name, arg_params):
"""
This method is used to retry a SAMP call several times.
Parameters
----------
recipient_private_key
The private key of the receiver of the call
recipient_public_key
The public key of the receiver of the call
samp_method_name : str
The name of the SAMP method to call
arg_params : tuple
Any additional arguments to be passed to the SAMP method
"""
if recipient_private_key is None:
raise SAMPHubError("Invalid client ID")
from . import conf
for attempt in range(conf.n_retries):
if not self._is_running:
time.sleep(0.01)
continue
try:
if (self._web_profile and
recipient_private_key in self._web_profile_callbacks):
# Web Profile
callback = {"samp.methodName": samp_method_name,
"samp.params": arg_params}
self._web_profile_callbacks[recipient_private_key].put(callback)
else:
# Standard Profile
hub = self._xmlrpc_endpoints[recipient_public_id][1]
getattr(hub.samp.client, samp_method_name)(recipient_private_key, *arg_params)
except xmlrpc.Fault as exc:
log.debug("{} XML-RPC endpoint error (attempt {}): {}"
.format(recipient_public_id, attempt + 1,
exc.faultString))
time.sleep(0.01)
else:
return
# If we are here, then the above attempts failed
error_message = samp_method_name + " failed after " + str(conf.n_retries) + " attempts"
raise SAMPHubError(error_message)
def _public_id_to_private_key(self, public_id):
for private_key in self._private_keys.keys():
if self._private_keys[private_key][0] == public_id:
return private_key
return None
def _get_new_hub_msg_id(self, sender_public_id, sender_msg_id):
with self._thread_lock:
self._hub_msg_id_counter += 1
return "msg#{};;{};;{};;{}".format(self._hub_msg_id_counter,
self._hub_public_id,
sender_public_id, sender_msg_id)
def _update_last_activity_time(self, private_key=None):
with self._thread_lock:
self._last_activity_time = time.time()
if private_key is not None:
self._client_activity_time[private_key] = time.time()
def _receive_notification(self, private_key, sender_id, message):
return ""
def _receive_call(self, private_key, sender_id, msg_id, message):
if private_key == self._hub_private_key:
if "samp.mtype" in message and message["samp.mtype"] == "samp.app.ping":
self._reply(self._hub_private_key, msg_id,
{"samp.status": SAMP_STATUS_OK, "samp.result": {}})
elif ("samp.mtype" in message and
(message["samp.mtype"] == "x-samp.query.by-meta" or
message["samp.mtype"] == "samp.query.by-meta")):
ids_list = self._query_by_metadata(message["samp.params"]["key"],
message["samp.params"]["value"])
self._reply(self._hub_private_key, msg_id,
{"samp.status": SAMP_STATUS_OK,
"samp.result": {"ids": ids_list}})
return ""
else:
return ""
def _receive_response(self, private_key, responder_id, msg_tag, response):
return ""
def _web_profile_register(self, identity_info,
client_address=("unknown", 0),
origin="unknown"):
self._update_last_activity_time()
if not client_address[0] in ["localhost", "127.0.0.1"]:
raise SAMPProxyError(403, "Request of registration rejected "
"by the Hub.")
if not origin:
origin = "unknown"
if isinstance(identity_info, dict):
# an old version of the protocol provided just a string with the app name
if "samp.name" not in identity_info:
raise SAMPProxyError(403, "Request of registration rejected "
"by the Hub (application name not "
"provided).")
# Red semaphore for the other threads
self._web_profile_requests_semaphore.put("wait")
# Set the request to be displayed for the current thread
self._web_profile_requests_queue.put((identity_info, client_address,
origin))
# Get the popup dialogue response
response = self._web_profile_requests_result.get()
# OK, semaphore green
self._web_profile_requests_semaphore.get()
if response:
register_map = self._perform_standard_register()
translator_url = ("http://localhost:{}/translator/{}?ref="
.format(self._web_port, register_map["samp.private-key"]))
register_map["samp.url-translator"] = translator_url
self._web_profile_server.add_client(register_map["samp.private-key"])
return register_map
else:
raise SAMPProxyError(403, "Request of registration rejected by "
"the user.")
def _web_profile_allowReverseCallbacks(self, private_key, allow):
self._update_last_activity_time()
if private_key in self._private_keys:
if allow == "0":
if private_key in self._web_profile_callbacks:
del self._web_profile_callbacks[private_key]
else:
self._web_profile_callbacks[private_key] = queue.Queue()
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
return ""
def _web_profile_pullCallbacks(self, private_key, timeout_secs):
self._update_last_activity_time()
if private_key in self._private_keys:
callback = []
callback_queue = self._web_profile_callbacks[private_key]
try:
while self._is_running:
item_queued = callback_queue.get_nowait()
callback.append(item_queued)
except queue.Empty:
pass
return callback
else:
raise SAMPProxyError(5, f"Private-key {private_key} expired or invalid.")
class WebProfileDialog:
"""
A base class to make writing Web Profile GUI consent dialogs
easier.
The concrete class must:
1) Poll ``handle_queue`` periodically, using the timer services
of the GUI's event loop. This function will call
``self.show_dialog`` when a request requires authorization.
``self.show_dialog`` will be given the arguments:
- ``samp_name``: The name of the application making the request.
- ``details``: A dictionary of details about the client
making the request.
- ``client``: A hostname, port pair containing the client
address.
- ``origin``: A string containing the origin of the
request.
2) Call ``consent`` or ``reject`` based on the user's response to
the dialog.
"""
def handle_queue(self):
try:
request = self.queue_request.get_nowait()
except queue.Empty: # queue is set but empty
pass
except AttributeError: # queue has not been set yet
pass
else:
if isinstance(request[0], str): # To support the old protocol version
samp_name = request[0]
else:
samp_name = request[0]["samp.name"]
self.show_dialog(samp_name, request[0], request[1], request[2])
def consent(self):
self.queue_result.put(True)
def reject(self):
self.queue_result.put(False)
|
02fd21dbaac256d48118bf83409dd3514902d3456f33074cd6f62c3853a461ad | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# TODO: this file should be refactored to use a more thread-safe and
# race-condition-safe lockfile mechanism.
import datetime
import os
import socket
import stat
import warnings
from contextlib import suppress
from urllib.parse import urlparse
import xmlrpc.client as xmlrpc
from astropy.config.paths import _find_home
from astropy import log
from astropy.utils.data import get_readable_fileobj
from .errors import SAMPHubError, SAMPWarning
def read_lockfile(lockfilename):
"""
Read in the lockfile given by ``lockfilename`` into a dictionary.
"""
# lockfilename may be a local file or a remote URL, but
# get_readable_fileobj takes care of this.
lockfiledict = {}
with get_readable_fileobj(lockfilename) as f:
for line in f:
if not line.startswith("#"):
kw, val = line.split("=")
lockfiledict[kw.strip()] = val.strip()
return lockfiledict
def write_lockfile(lockfilename, lockfiledict):
lockfile = open(lockfilename, "w")
lockfile.close()
os.chmod(lockfilename, stat.S_IREAD + stat.S_IWRITE)
lockfile = open(lockfilename, "w")
now_iso = datetime.datetime.now().isoformat()
lockfile.write(f"# SAMP lockfile written on {now_iso}\n")
lockfile.write("# Standard Profile required keys\n")
for key, value in lockfiledict.items():
lockfile.write(f"{key}={value}\n")
lockfile.close()
def create_lock_file(lockfilename=None, mode=None, hub_id=None,
hub_params=None):
# Remove lock-files of dead hubs
remove_garbage_lock_files()
lockfiledir = ""
# CHECK FOR SAMP_HUB ENVIRONMENT VARIABLE
if "SAMP_HUB" in os.environ:
# For the time being I assume just the std profile supported.
if os.environ["SAMP_HUB"].startswith("std-lockurl:"):
lockfilename = os.environ["SAMP_HUB"][len("std-lockurl:"):]
lockfile_parsed = urlparse(lockfilename)
if lockfile_parsed[0] != 'file':
warnings.warn("Unable to start a Hub with lockfile {}. "
"Start-up process aborted.".format(lockfilename),
SAMPWarning)
return False
else:
lockfilename = lockfile_parsed[2]
else:
# If it is a fresh Hub instance
if lockfilename is None:
log.debug("Running mode: " + mode)
if mode == 'single':
lockfilename = os.path.join(_find_home(), ".samp")
else:
lockfiledir = os.path.join(_find_home(), ".samp-1")
# If missing create .samp-1 directory
try:
os.mkdir(lockfiledir)
except OSError:
pass # directory already exists
finally:
os.chmod(lockfiledir,
stat.S_IREAD + stat.S_IWRITE + stat.S_IEXEC)
lockfilename = os.path.join(lockfiledir,
f"samp-hub-{hub_id}")
else:
log.debug("Running mode: multiple")
hub_is_running, lockfiledict = check_running_hub(lockfilename)
if hub_is_running:
warnings.warn("Another SAMP Hub is already running. Start-up process "
"aborted.", SAMPWarning)
return False
log.debug("Lock-file: " + lockfilename)
write_lockfile(lockfilename, hub_params)
return lockfilename
def get_main_running_hub():
"""
Get either the hub given by the environment variable SAMP_HUB, or the one
given by the lockfile .samp in the user home directory.
"""
hubs = get_running_hubs()
if not hubs:
raise SAMPHubError("Unable to find a running SAMP Hub.")
# CHECK FOR SAMP_HUB ENVIRONMENT VARIABLE
if "SAMP_HUB" in os.environ:
# For the time being I assume just the std profile supported.
if os.environ["SAMP_HUB"].startswith("std-lockurl:"):
lockfilename = os.environ["SAMP_HUB"][len("std-lockurl:"):]
else:
raise SAMPHubError("SAMP Hub profile not supported.")
else:
lockfilename = os.path.join(_find_home(), ".samp")
return hubs[lockfilename]
def get_running_hubs():
"""
Return a dictionary containing the lock-file contents of all the currently
running hubs (single and/or multiple mode).
The dictionary format is:
``{<lock-file>: {<token-name>: <token-string>, ...}, ...}``
where ``{<lock-file>}`` is the lock-file name, ``{<token-name>}`` and
``{<token-string>}`` are the lock-file tokens (name and content).
Returns
-------
running_hubs : dict
Lock-file contents of all the currently running hubs.
"""
hubs = {}
lockfilename = ""
# HUB SINGLE INSTANCE MODE
# CHECK FOR SAMP_HUB ENVIRONMENT VARIABLE
if "SAMP_HUB" in os.environ:
# For the time being I assume just the std profile supported.
if os.environ["SAMP_HUB"].startswith("std-lockurl:"):
lockfilename = os.environ["SAMP_HUB"][len("std-lockurl:"):]
else:
lockfilename = os.path.join(_find_home(), ".samp")
hub_is_running, lockfiledict = check_running_hub(lockfilename)
if hub_is_running:
hubs[lockfilename] = lockfiledict
# HUB MULTIPLE INSTANCE MODE
lockfiledir = ""
lockfiledir = os.path.join(_find_home(), ".samp-1")
if os.path.isdir(lockfiledir):
for filename in os.listdir(lockfiledir):
if filename.startswith('samp-hub'):
lockfilename = os.path.join(lockfiledir, filename)
hub_is_running, lockfiledict = check_running_hub(lockfilename)
if hub_is_running:
hubs[lockfilename] = lockfiledict
return hubs
def check_running_hub(lockfilename):
"""
Test whether a hub identified by ``lockfilename`` is running or not.
Parameters
----------
lockfilename : str
Lock-file name (path + file name) of the Hub to be tested.
Returns
-------
is_running : bool
Whether the hub is running
hub_params : dict
If the hub is running this contains the parameters from the lockfile
"""
is_running = False
lockfiledict = {}
# Check whether a lockfile already exists
try:
lockfiledict = read_lockfile(lockfilename)
except OSError:
return is_running, lockfiledict
if "samp.hub.xmlrpc.url" in lockfiledict:
try:
proxy = xmlrpc.ServerProxy(lockfiledict["samp.hub.xmlrpc.url"]
.replace("\\", ""), allow_none=1)
proxy.samp.hub.ping()
is_running = True
except xmlrpc.ProtocolError:
# There is a protocol error (e.g. for authentication required),
# but the server is alive
is_running = True
except OSError:
pass
return is_running, lockfiledict
def remove_garbage_lock_files():
lockfilename = ""
# HUB SINGLE INSTANCE MODE
lockfilename = os.path.join(_find_home(), ".samp")
hub_is_running, lockfiledict = check_running_hub(lockfilename)
if not hub_is_running:
# If lockfilename belongs to a dead hub, then it is deleted
if os.path.isfile(lockfilename):
with suppress(OSError):
os.remove(lockfilename)
# HUB MULTIPLE INSTANCE MODE
lockfiledir = os.path.join(_find_home(), ".samp-1")
if os.path.isdir(lockfiledir):
for filename in os.listdir(lockfiledir):
if filename.startswith('samp-hub'):
lockfilename = os.path.join(lockfiledir, filename)
hub_is_running, lockfiledict = check_running_hub(lockfilename)
if not hub_is_running:
# If lockfilename belongs to a dead hub, then it is deleted
if os.path.isfile(lockfilename):
with suppress(OSError):
os.remove(lockfilename)
|
34571a3e95074cf768729ab702ffb4c6b31a13f34628bf76bb358742a417483e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import os
import select
import socket
import threading
import warnings
from urllib.parse import urlunparse
from .constants import SAMP_STATUS_OK, SAMP_STATUS_WARNING
from .hub import SAMPHubServer
from .errors import SAMPClientError, SAMPWarning
from .utils import internet_on, get_num_args
from .standard_profile import ThreadingXMLRPCServer
__all__ = ['SAMPClient']
class SAMPClient:
"""
Utility class which provides facilities to create and manage a SAMP
compliant XML-RPC server that acts as SAMP callable client application.
Parameters
----------
hub : :class:`~astropy.samp.SAMPHubProxy`
An instance of :class:`~astropy.samp.SAMPHubProxy` to be
used for messaging with the SAMP Hub.
name : str, optional
Client name (corresponding to ``samp.name`` metadata keyword).
description : str, optional
Client description (corresponding to ``samp.description.text`` metadata
keyword).
metadata : dict, optional
Client application metadata in the standard SAMP format.
addr : str, optional
Listening address (or IP). This defaults to 127.0.0.1 if the internet
is not reachable, otherwise it defaults to the host name.
port : int, optional
Listening XML-RPC server socket port. If left set to 0 (the default),
the operating system will select a free port.
callable : bool, optional
Whether the client can receive calls and notifications. If set to
`False`, then the client can send notifications and calls, but can not
receive any.
"""
# TODO: define what is meant by callable
def __init__(self, hub, name=None, description=None, metadata=None,
addr=None, port=0, callable=True):
# GENERAL
self._is_running = False
self._is_registered = False
if metadata is None:
metadata = {}
if name is not None:
metadata["samp.name"] = name
if description is not None:
metadata["samp.description.text"] = description
self._metadata = metadata
self._addr = addr
self._port = port
self._xmlrpcAddr = None
self._callable = callable
# HUB INTERACTION
self.client = None
self._public_id = None
self._private_key = None
self._hub_id = None
self._notification_bindings = {}
self._call_bindings = {"samp.app.ping": [self._ping, {}],
"client.env.get": [self._client_env_get, {}]}
self._response_bindings = {}
self._host_name = "127.0.0.1"
if internet_on():
try:
self._host_name = socket.getfqdn()
socket.getaddrinfo(self._addr or self._host_name, self._port or 0)
except OSError:
self._host_name = "127.0.0.1"
self.hub = hub
if self._callable:
self._thread = threading.Thread(target=self._serve_forever)
self._thread.daemon = True
self.client = ThreadingXMLRPCServer((self._addr or self._host_name,
self._port), logRequests=False, allow_none=True)
self.client.register_introspection_functions()
self.client.register_function(self.receive_notification, 'samp.client.receiveNotification')
self.client.register_function(self.receive_call, 'samp.client.receiveCall')
self.client.register_function(self.receive_response, 'samp.client.receiveResponse')
# If the port was set to zero, then the operating system has
# selected a free port. We now check what this port number is.
if self._port == 0:
self._port = self.client.socket.getsockname()[1]
protocol = 'http'
self._xmlrpcAddr = urlunparse((protocol,
'{}:{}'.format(self._addr or self._host_name,
self._port),
'', '', '', ''))
def start(self):
"""
Start the client in a separate thread (non-blocking).
This only has an effect if ``callable`` was set to `True` when
initializing the client.
"""
if self._callable:
self._is_running = True
self._run_client()
def stop(self, timeout=10.):
"""
Stop the client.
Parameters
----------
timeout : float
Timeout after which to give up if the client cannot be cleanly
shut down.
"""
# Setting _is_running to False causes the loop in _serve_forever to
# exit. The thread should then stop running. We wait for the thread to
# terminate until the timeout, then we continue anyway.
self._is_running = False
if self._callable and self._thread.is_alive():
self._thread.join(timeout)
if self._thread.is_alive():
raise SAMPClientError("Client was not shut down successfully "
"(timeout={}s)".format(timeout))
@property
def is_running(self):
"""
Whether the client is currently running.
"""
return self._is_running
@property
def is_registered(self):
"""
Whether the client is currently registered.
"""
return self._is_registered
def _run_client(self):
if self._callable:
self._thread.start()
def _serve_forever(self):
while self._is_running:
try:
read_ready = select.select([self.client.socket], [], [], 0.1)[0]
except OSError as exc:
warnings.warn(f"Call to select in SAMPClient failed: {exc}",
SAMPWarning)
else:
if read_ready:
self.client.handle_request()
self.client.server_close()
def _ping(self, private_key, sender_id, msg_id, msg_mtype, msg_params,
message):
reply = {"samp.status": SAMP_STATUS_OK, "samp.result": {}}
self.hub.reply(private_key, msg_id, reply)
def _client_env_get(self, private_key, sender_id, msg_id, msg_mtype,
msg_params, message):
if msg_params["name"] in os.environ:
reply = {"samp.status": SAMP_STATUS_OK,
"samp.result": {"value": os.environ[msg_params["name"]]}}
else:
reply = {"samp.status": SAMP_STATUS_WARNING,
"samp.result": {"value": ""},
"samp.error": {"samp.errortxt":
"Environment variable not defined."}}
self.hub.reply(private_key, msg_id, reply)
def _handle_notification(self, private_key, sender_id, message):
if private_key == self.get_private_key() and "samp.mtype" in message:
msg_mtype = message["samp.mtype"]
del message["samp.mtype"]
msg_params = message["samp.params"]
del message["samp.params"]
msubs = SAMPHubServer.get_mtype_subtypes(msg_mtype)
for mtype in msubs:
if mtype in self._notification_bindings:
bound_func = self._notification_bindings[mtype][0]
if get_num_args(bound_func) == 5:
bound_func(private_key, sender_id, msg_mtype,
msg_params, message)
else:
bound_func(private_key, sender_id, None, msg_mtype,
msg_params, message)
return ""
def receive_notification(self, private_key, sender_id, message):
"""
Standard callable client ``receive_notification`` method.
This method is automatically handled when the
:meth:`~astropy.samp.client.SAMPClient.bind_receive_notification`
method is used to bind distinct operations to MTypes. In case of a
customized callable client implementation that inherits from the
:class:`~astropy.samp.SAMPClient` class this method should be
overwritten.
.. note:: When overwritten, this method must always return
a string result (even empty).
Parameters
----------
private_key : str
Client private key.
sender_id : str
Sender public ID.
message : dict
Received message.
Returns
-------
confirmation : str
Any confirmation string.
"""
return self._handle_notification(private_key, sender_id, message)
def _handle_call(self, private_key, sender_id, msg_id, message):
if private_key == self.get_private_key() and "samp.mtype" in message:
msg_mtype = message["samp.mtype"]
del message["samp.mtype"]
msg_params = message["samp.params"]
del message["samp.params"]
msubs = SAMPHubServer.get_mtype_subtypes(msg_mtype)
for mtype in msubs:
if mtype in self._call_bindings:
self._call_bindings[mtype][0](private_key, sender_id,
msg_id, msg_mtype,
msg_params, message)
return ""
def receive_call(self, private_key, sender_id, msg_id, message):
"""
Standard callable client ``receive_call`` method.
This method is automatically handled when the
:meth:`~astropy.samp.client.SAMPClient.bind_receive_call` method is
used to bind distinct operations to MTypes. In case of a customized
callable client implementation that inherits from the
:class:`~astropy.samp.SAMPClient` class this method should be
overwritten.
.. note:: When overwritten, this method must always return
a string result (even empty).
Parameters
----------
private_key : str
Client private key.
sender_id : str
Sender public ID.
msg_id : str
Message ID received.
message : dict
Received message.
Returns
-------
confirmation : str
Any confirmation string.
"""
return self._handle_call(private_key, sender_id, msg_id, message)
def _handle_response(self, private_key, responder_id, msg_tag, response):
if (private_key == self.get_private_key() and
msg_tag in self._response_bindings):
self._response_bindings[msg_tag](private_key, responder_id,
msg_tag, response)
return ""
def receive_response(self, private_key, responder_id, msg_tag, response):
"""
Standard callable client ``receive_response`` method.
This method is automatically handled when the
:meth:`~astropy.samp.client.SAMPClient.bind_receive_response` method
is used to bind distinct operations to MTypes. In case of a customized
callable client implementation that inherits from the
:class:`~astropy.samp.SAMPClient` class this method should be
overwritten.
.. note:: When overwritten, this method must always return
a string result (even empty).
Parameters
----------
private_key : str
Client private key.
responder_id : str
Responder public ID.
msg_tag : str
Response message tag.
response : dict
Received response.
Returns
-------
confirmation : str
Any confirmation string.
"""
return self._handle_response(private_key, responder_id, msg_tag,
response)
def bind_receive_message(self, mtype, function, declare=True,
metadata=None):
"""
Bind a specific MType to a function or class method, being intended for
a call or a notification.
The function must be of the form::
def my_function_or_method(<self,> private_key, sender_id, msg_id,
mtype, params, extra)
where ``private_key`` is the client private-key, ``sender_id`` is the
notification sender ID, ``msg_id`` is the Hub message-id (calls only,
otherwise is `None`), ``mtype`` is the message MType, ``params`` is the
message parameter set (content of ``"samp.params"``) and ``extra`` is a
dictionary containing any extra message map entry. The client is
automatically declared subscribed to the MType by default.
Parameters
----------
mtype : str
MType to be caught.
function : callable
Application function to be used when ``mtype`` is received.
declare : bool, optional
Specify whether the client must be automatically declared as
subscribed to the MType (see also
:meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`).
metadata : dict, optional
Dictionary containing additional metadata to declare associated
with the MType subscribed to (see also
:meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`).
"""
self.bind_receive_call(mtype, function, declare=declare,
metadata=metadata)
self.bind_receive_notification(mtype, function, declare=declare,
metadata=metadata)
def bind_receive_notification(self, mtype, function, declare=True, metadata=None):
"""
Bind a specific MType notification to a function or class method.
The function must be of the form::
def my_function_or_method(<self,> private_key, sender_id, mtype,
params, extra)
where ``private_key`` is the client private-key, ``sender_id`` is the
notification sender ID, ``mtype`` is the message MType, ``params`` is
the notified message parameter set (content of ``"samp.params"``) and
``extra`` is a dictionary containing any extra message map entry. The
client is automatically declared subscribed to the MType by default.
Parameters
----------
mtype : str
MType to be caught.
function : callable
Application function to be used when ``mtype`` is received.
declare : bool, optional
Specify whether the client must be automatically declared as
subscribed to the MType (see also
:meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`).
metadata : dict, optional
Dictionary containing additional metadata to declare associated
with the MType subscribed to (see also
:meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`).
"""
if self._callable:
if not metadata:
metadata = {}
self._notification_bindings[mtype] = [function, metadata]
if declare:
self._declare_subscriptions()
else:
raise SAMPClientError("Client not callable.")
def bind_receive_call(self, mtype, function, declare=True, metadata=None):
"""
Bind a specific MType call to a function or class method.
The function must be of the form::
def my_function_or_method(<self,> private_key, sender_id, msg_id,
mtype, params, extra)
where ``private_key`` is the client private-key, ``sender_id`` is the
notification sender ID, ``msg_id`` is the Hub message-id, ``mtype`` is
the message MType, ``params`` is the message parameter set (content of
``"samp.params"``) and ``extra`` is a dictionary containing any extra
message map entry. The client is automatically declared subscribed to
the MType by default.
Parameters
----------
mtype : str
MType to be caught.
function : callable
Application function to be used when ``mtype`` is received.
declare : bool, optional
Specify whether the client must be automatically declared as
subscribed to the MType (see also
:meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`).
metadata : dict, optional
Dictionary containing additional metadata to declare associated
with the MType subscribed to (see also
:meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`).
"""
if self._callable:
if not metadata:
metadata = {}
self._call_bindings[mtype] = [function, metadata]
if declare:
self._declare_subscriptions()
else:
raise SAMPClientError("Client not callable.")
def bind_receive_response(self, msg_tag, function):
"""
Bind a specific msg-tag response to a function or class method.
The function must be of the form::
def my_function_or_method(<self,> private_key, responder_id,
msg_tag, response)
where ``private_key`` is the client private-key, ``responder_id`` is
the message responder ID, ``msg_tag`` is the message-tag provided at
call time and ``response`` is the response received.
Parameters
----------
msg_tag : str
Message-tag to be caught.
function : callable
Application function to be used when ``msg_tag`` is received.
"""
if self._callable:
self._response_bindings[msg_tag] = function
else:
raise SAMPClientError("Client not callable.")
def unbind_receive_notification(self, mtype, declare=True):
"""
Remove from the notifications binding table the specified MType and
unsubscribe the client from it (if required).
Parameters
----------
mtype : str
MType to be removed.
declare : bool
Specify whether the client must be automatically declared as
unsubscribed from the MType (see also
:meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`).
"""
if self._callable:
del self._notification_bindings[mtype]
if declare:
self._declare_subscriptions()
else:
raise SAMPClientError("Client not callable.")
def unbind_receive_call(self, mtype, declare=True):
"""
Remove from the calls binding table the specified MType and unsubscribe
the client from it (if required).
Parameters
----------
mtype : str
MType to be removed.
declare : bool
Specify whether the client must be automatically declared as
unsubscribed from the MType (see also
:meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`).
"""
if self._callable:
del self._call_bindings[mtype]
if declare:
self._declare_subscriptions()
else:
raise SAMPClientError("Client not callable.")
def unbind_receive_response(self, msg_tag):
"""
Remove from the responses binding table the specified message-tag.
Parameters
----------
msg_tag : str
Message-tag to be removed.
"""
if self._callable:
del self._response_bindings[msg_tag]
else:
raise SAMPClientError("Client not callable.")
def declare_subscriptions(self, subscriptions=None):
"""
Declares the MTypes the client wishes to subscribe to, implicitly
defined with the MType binding methods
:meth:`~astropy.samp.client.SAMPClient.bind_receive_notification`
and :meth:`~astropy.samp.client.SAMPClient.bind_receive_call`.
An optional ``subscriptions`` map can be added to the final map passed
to the :meth:`~astropy.samp.hub_proxy.SAMPHubProxy.declare_subscriptions`
method.
Parameters
----------
subscriptions : dict, optional
Dictionary containing the list of MTypes to subscribe to, with the
same format of the ``subscriptions`` map passed to the
:meth:`~astropy.samp.hub_proxy.SAMPHubProxy.declare_subscriptions`
method.
"""
if self._callable:
self._declare_subscriptions(subscriptions)
else:
raise SAMPClientError("Client not callable.")
def register(self):
"""
Register the client to the SAMP Hub.
"""
if self.hub.is_connected:
if self._private_key is not None:
raise SAMPClientError("Client already registered")
result = self.hub.register(self.hub.lockfile["samp.secret"])
if result["samp.self-id"] == "":
raise SAMPClientError("Registration failed - "
"samp.self-id was not set by the hub.")
if result["samp.private-key"] == "":
raise SAMPClientError("Registration failed - "
"samp.private-key was not set by the hub.")
self._public_id = result["samp.self-id"]
self._private_key = result["samp.private-key"]
self._hub_id = result["samp.hub-id"]
if self._callable:
self._set_xmlrpc_callback()
self._declare_subscriptions()
if self._metadata != {}:
self.declare_metadata()
self._is_registered = True
else:
raise SAMPClientError("Unable to register to the SAMP Hub. "
"Hub proxy not connected.")
def unregister(self):
"""
Unregister the client from the SAMP Hub.
"""
if self.hub.is_connected:
self._is_registered = False
self.hub.unregister(self._private_key)
self._hub_id = None
self._public_id = None
self._private_key = None
else:
raise SAMPClientError("Unable to unregister from the SAMP Hub. "
"Hub proxy not connected.")
def _set_xmlrpc_callback(self):
if self.hub.is_connected and self._private_key is not None:
self.hub.set_xmlrpc_callback(self._private_key,
self._xmlrpcAddr)
def _declare_subscriptions(self, subscriptions=None):
if self.hub.is_connected and self._private_key is not None:
mtypes_dict = {}
# Collect notification mtypes and metadata
for mtype in self._notification_bindings.keys():
mtypes_dict[mtype] = copy.deepcopy(self._notification_bindings[mtype][1])
# Collect notification mtypes and metadata
for mtype in self._call_bindings.keys():
mtypes_dict[mtype] = copy.deepcopy(self._call_bindings[mtype][1])
# Add optional subscription map
if subscriptions:
mtypes_dict.update(copy.deepcopy(subscriptions))
self.hub.declare_subscriptions(self._private_key, mtypes_dict)
else:
raise SAMPClientError("Unable to declare subscriptions. Hub "
"unreachable or not connected or client "
"not registered.")
def declare_metadata(self, metadata=None):
"""
Declare the client application metadata supported.
Parameters
----------
metadata : dict, optional
Dictionary containing the client application metadata as defined in
the SAMP definition document. If omitted, then no metadata are
declared.
"""
if self.hub.is_connected and self._private_key is not None:
if metadata is not None:
self._metadata.update(metadata)
self.hub.declare_metadata(self._private_key, self._metadata)
else:
raise SAMPClientError("Unable to declare metadata. Hub "
"unreachable or not connected or client "
"not registered.")
def get_private_key(self):
"""
Return the client private key used for the Standard Profile
communications obtained at registration time (``samp.private-key``).
Returns
-------
key : str
Client private key.
"""
return self._private_key
def get_public_id(self):
"""
Return public client ID obtained at registration time
(``samp.self-id``).
Returns
-------
id : str
Client public ID.
"""
return self._public_id
|
3dc7eabd7bbc3c70d2a985795abc8edef7c1f3b3cc72fb62589f6e0c1ef9fdf7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Sundry function and class decorators."""
import functools
import inspect
import textwrap
import threading
import types
import warnings
from inspect import signature
from .exceptions import (AstropyDeprecationWarning, AstropyUserWarning,
AstropyPendingDeprecationWarning)
__all__ = ['classproperty', 'deprecated', 'deprecated_attribute',
'deprecated_renamed_argument', 'format_doc',
'lazyproperty', 'sharedmethod']
_NotFound = object()
def deprecated(since, message='', name='', alternative='', pending=False,
obj_type=None, warning_type=AstropyDeprecationWarning):
"""
Used to mark a function or class as deprecated.
To mark an attribute as deprecated, use `deprecated_attribute`.
Parameters
----------
since : str
The release at which this API became deprecated. This is
required.
message : str, optional
Override the default deprecation message. The format
specifier ``func`` may be used for the name of the function,
and ``alternative`` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function. ``obj_type`` may be used to insert a friendly name
for the type of object being deprecated.
name : str, optional
The name of the deprecated function or class; if not provided
the name is automatically determined from the passed in
function or class, though this is useful in the case of
renamed functions, where the new function is just assigned to
the name of the deprecated function. For example::
def new_function():
...
oldFunction = new_function
alternative : str, optional
An alternative function or class name that the user may use in
place of the deprecated object. The deprecation warning will
tell the user about this alternative if provided.
pending : bool, optional
If True, uses a AstropyPendingDeprecationWarning instead of a
``warning_type``.
obj_type : str, optional
The type of this object, if the automatically determined one
needs to be overridden.
warning_type : Warning
Warning to be issued.
Default is `~astropy.utils.exceptions.AstropyDeprecationWarning`.
"""
method_types = (classmethod, staticmethod, types.MethodType)
def deprecate_doc(old_doc, message):
"""
Returns a given docstring with a deprecation message prepended
to it.
"""
if not old_doc:
old_doc = ''
old_doc = textwrap.dedent(old_doc).strip('\n')
new_doc = (('\n.. deprecated:: {since}'
'\n {message}\n\n'.format(
**{'since': since, 'message': message.strip()})) + old_doc)
if not old_doc:
# This is to prevent a spurious 'unexpected unindent' warning from
# docutils when the original docstring was blank.
new_doc += r'\ '
return new_doc
def get_function(func):
"""
Given a function or classmethod (or other function wrapper type), get
the function object.
"""
if isinstance(func, method_types):
func = func.__func__
return func
def deprecate_function(func, message, warning_type=warning_type):
"""
Returns a wrapped function that displays ``warning_type``
when it is called.
"""
if isinstance(func, method_types):
func_wrapper = type(func)
else:
func_wrapper = lambda f: f # noqa: E731
func = get_function(func)
def deprecated_func(*args, **kwargs):
if pending:
category = AstropyPendingDeprecationWarning
else:
category = warning_type
warnings.warn(message, category, stacklevel=2)
return func(*args, **kwargs)
# If this is an extension function, we can't call
# functools.wraps on it, but we normally don't care.
# This crazy way to get the type of a wrapper descriptor is
# straight out of the Python 3.3 inspect module docs.
if type(func) is not type(str.__dict__['__add__']): # noqa: E721
deprecated_func = functools.wraps(func)(deprecated_func)
deprecated_func.__doc__ = deprecate_doc(
deprecated_func.__doc__, message)
return func_wrapper(deprecated_func)
def deprecate_class(cls, message, warning_type=warning_type):
"""
Update the docstring and wrap the ``__init__`` in-place (or ``__new__``
if the class or any of the bases overrides ``__new__``) so it will give
a deprecation warning when an instance is created.
This won't work for extension classes because these can't be modified
in-place and the alternatives don't work in the general case:
- Using a new class that looks and behaves like the original doesn't
work because the __new__ method of extension types usually makes sure
that it's the same class or a subclass.
- Subclassing the class and return the subclass can lead to problems
with pickle and will look weird in the Sphinx docs.
"""
cls.__doc__ = deprecate_doc(cls.__doc__, message)
if cls.__new__ is object.__new__:
cls.__init__ = deprecate_function(get_function(cls.__init__),
message, warning_type)
else:
cls.__new__ = deprecate_function(get_function(cls.__new__),
message, warning_type)
return cls
def deprecate(obj, message=message, name=name, alternative=alternative,
pending=pending, warning_type=warning_type):
if obj_type is None:
if isinstance(obj, type):
obj_type_name = 'class'
elif inspect.isfunction(obj):
obj_type_name = 'function'
elif inspect.ismethod(obj) or isinstance(obj, method_types):
obj_type_name = 'method'
else:
obj_type_name = 'object'
else:
obj_type_name = obj_type
if not name:
name = get_function(obj).__name__
altmessage = ''
if not message or type(message) is type(deprecate):
if pending:
message = ('The {func} {obj_type} will be deprecated in a '
'future version.')
else:
message = ('The {func} {obj_type} is deprecated and may '
'be removed in a future version.')
if alternative:
altmessage = f'\n Use {alternative} instead.'
message = ((message.format(**{
'func': name,
'name': name,
'alternative': alternative,
'obj_type': obj_type_name})) +
altmessage)
if isinstance(obj, type):
return deprecate_class(obj, message, warning_type)
else:
return deprecate_function(obj, message, warning_type)
if type(message) is type(deprecate):
return deprecate(message)
return deprecate
def deprecated_attribute(name, since, message=None, alternative=None,
pending=False, warning_type=AstropyDeprecationWarning):
"""
Used to mark a public attribute as deprecated. This creates a
property that will warn when the given attribute name is accessed.
To prevent the warning (i.e. for internal code), use the private
name for the attribute by prepending an underscore
(i.e. ``self._name``).
Parameters
----------
name : str
The name of the deprecated attribute.
since : str
The release at which this API became deprecated. This is
required.
message : str, optional
Override the default deprecation message. The format
specifier ``name`` may be used for the name of the attribute,
and ``alternative`` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function.
alternative : str, optional
An alternative attribute that the user may use in place of the
deprecated attribute. The deprecation warning will tell the
user about this alternative if provided.
pending : bool, optional
If True, uses a AstropyPendingDeprecationWarning instead of
``warning_type``.
warning_type : Warning
Warning to be issued.
Default is `~astropy.utils.exceptions.AstropyDeprecationWarning`.
Examples
--------
::
class MyClass:
# Mark the old_name as deprecated
old_name = misc.deprecated_attribute('old_name', '0.1')
def method(self):
self._old_name = 42
"""
private_name = '_' + name
specific_deprecated = deprecated(since, name=name, obj_type='attribute',
message=message, alternative=alternative,
pending=pending,
warning_type=warning_type)
@specific_deprecated
def get(self):
return getattr(self, private_name)
@specific_deprecated
def set(self, val):
setattr(self, private_name, val)
@specific_deprecated
def delete(self):
delattr(self, private_name)
return property(get, set, delete)
def deprecated_renamed_argument(old_name, new_name, since,
arg_in_kwargs=False, relax=False,
pending=False,
warning_type=AstropyDeprecationWarning,
alternative='', message=''):
"""Deprecate a _renamed_ or _removed_ function argument.
The decorator assumes that the argument with the ``old_name`` was removed
from the function signature and the ``new_name`` replaced it at the
**same position** in the signature. If the ``old_name`` argument is
given when calling the decorated function the decorator will catch it and
issue a deprecation warning and pass it on as ``new_name`` argument.
Parameters
----------
old_name : str or sequence of str
The old name of the argument.
new_name : str or sequence of str or None
The new name of the argument. Set this to `None` to remove the
argument ``old_name`` instead of renaming it.
since : str or number or sequence of str or number
The release at which the old argument became deprecated.
arg_in_kwargs : bool or sequence of bool, optional
If the argument is not a named argument (for example it
was meant to be consumed by ``**kwargs``) set this to
``True``. Otherwise the decorator will throw an Exception
if the ``new_name`` cannot be found in the signature of
the decorated function.
Default is ``False``.
relax : bool or sequence of bool, optional
If ``False`` a ``TypeError`` is raised if both ``new_name`` and
``old_name`` are given. If ``True`` the value for ``new_name`` is used
and a Warning is issued.
Default is ``False``.
pending : bool or sequence of bool, optional
If ``True`` this will hide the deprecation warning and ignore the
corresponding ``relax`` parameter value.
Default is ``False``.
warning_type : Warning
Warning to be issued.
Default is `~astropy.utils.exceptions.AstropyDeprecationWarning`.
alternative : str, optional
An alternative function or class name that the user may use in
place of the deprecated object if ``new_name`` is None. The deprecation
warning will tell the user about this alternative if provided.
message : str, optional
A custom warning message. If provided then ``since`` and
``alternative`` options will have no effect.
Raises
------
TypeError
If the new argument name cannot be found in the function
signature and arg_in_kwargs was False or if it is used to
deprecate the name of the ``*args``-, ``**kwargs``-like arguments.
At runtime such an Error is raised if both the new_name
and old_name were specified when calling the function and
"relax=False".
Notes
-----
The decorator should be applied to a function where the **name**
of an argument was changed but it applies the same logic.
.. warning::
If ``old_name`` is a list or tuple the ``new_name`` and ``since`` must
also be a list or tuple with the same number of entries. ``relax`` and
``arg_in_kwarg`` can be a single bool (applied to all) or also a
list/tuple with the same number of entries like ``new_name``, etc.
Examples
--------
The deprecation warnings are not shown in the following examples.
To deprecate a positional or keyword argument::
>>> from astropy.utils.decorators import deprecated_renamed_argument
>>> @deprecated_renamed_argument('sig', 'sigma', '1.0')
... def test(sigma):
... return sigma
>>> test(2)
2
>>> test(sigma=2)
2
>>> test(sig=2) # doctest: +SKIP
2
To deprecate an argument caught inside the ``**kwargs`` the
``arg_in_kwargs`` has to be set::
>>> @deprecated_renamed_argument('sig', 'sigma', '1.0',
... arg_in_kwargs=True)
... def test(**kwargs):
... return kwargs['sigma']
>>> test(sigma=2)
2
>>> test(sig=2) # doctest: +SKIP
2
By default providing the new and old keyword will lead to an Exception. If
a Warning is desired set the ``relax`` argument::
>>> @deprecated_renamed_argument('sig', 'sigma', '1.0', relax=True)
... def test(sigma):
... return sigma
>>> test(sig=2) # doctest: +SKIP
2
It is also possible to replace multiple arguments. The ``old_name``,
``new_name`` and ``since`` have to be `tuple` or `list` and contain the
same number of entries::
>>> @deprecated_renamed_argument(['a', 'b'], ['alpha', 'beta'],
... ['1.0', 1.2])
... def test(alpha, beta):
... return alpha, beta
>>> test(a=2, b=3) # doctest: +SKIP
(2, 3)
In this case ``arg_in_kwargs`` and ``relax`` can be a single value (which
is applied to all renamed arguments) or must also be a `tuple` or `list`
with values for each of the arguments.
"""
cls_iter = (list, tuple)
if isinstance(old_name, cls_iter):
n = len(old_name)
# Assume that new_name and since are correct (tuple/list with the
# appropriate length) in the spirit of the "consenting adults". But the
# optional parameters may not be set, so if these are not iterables
# wrap them.
if not isinstance(arg_in_kwargs, cls_iter):
arg_in_kwargs = [arg_in_kwargs] * n
if not isinstance(relax, cls_iter):
relax = [relax] * n
if not isinstance(pending, cls_iter):
pending = [pending] * n
if not isinstance(message, cls_iter):
message = [message] * n
else:
# To allow a uniform approach later on, wrap all arguments in lists.
n = 1
old_name = [old_name]
new_name = [new_name]
since = [since]
arg_in_kwargs = [arg_in_kwargs]
relax = [relax]
pending = [pending]
message = [message]
def decorator(function):
# The named arguments of the function.
arguments = signature(function).parameters
keys = list(arguments.keys())
position = [None] * n
for i in range(n):
# Determine the position of the argument.
if arg_in_kwargs[i]:
pass
else:
if new_name[i] is None:
param = arguments[old_name[i]]
elif new_name[i] in arguments:
param = arguments[new_name[i]]
# In case the argument is not found in the list of arguments
# the only remaining possibility is that it should be caught
# by some kind of **kwargs argument.
# This case has to be explicitly specified, otherwise throw
# an exception!
else:
raise TypeError(
f'"{new_name[i]}" was not specified in the function '
'signature. If it was meant to be part of '
'"**kwargs" then set "arg_in_kwargs" to "True"')
# There are several possibilities now:
# 1.) Positional or keyword argument:
if param.kind == param.POSITIONAL_OR_KEYWORD:
if new_name[i] is None:
position[i] = keys.index(old_name[i])
else:
position[i] = keys.index(new_name[i])
# 2.) Keyword only argument:
elif param.kind == param.KEYWORD_ONLY:
# These cannot be specified by position.
position[i] = None
# 3.) positional-only argument, varargs, varkwargs or some
# unknown type:
else:
raise TypeError(f'cannot replace argument "{new_name[i]}" '
f'of kind {repr(param.kind)}.')
@functools.wraps(function)
def wrapper(*args, **kwargs):
for i in range(n):
msg = message[i] or (f'"{old_name[i]}" was deprecated in '
f'version {since[i]} and will be removed '
'in a future version. ')
# The only way to have oldkeyword inside the function is
# that it is passed as kwarg because the oldkeyword
# parameter was renamed to newkeyword.
if old_name[i] in kwargs:
value = kwargs.pop(old_name[i])
# Display the deprecation warning only when it's not
# pending.
if not pending[i]:
if not message[i]:
if new_name[i] is not None:
msg += f'Use argument "{new_name[i]}" instead.'
elif alternative:
msg += f'\n Use {alternative} instead.'
warnings.warn(msg, warning_type, stacklevel=2)
# Check if the newkeyword was given as well.
newarg_in_args = (position[i] is not None and
len(args) > position[i])
newarg_in_kwargs = new_name[i] in kwargs
if newarg_in_args or newarg_in_kwargs:
if not pending[i]:
# If both are given print a Warning if relax is
# True or raise an Exception is relax is False.
if relax[i]:
warnings.warn(
f'"{old_name[i]}" and "{new_name[i]}" '
'keywords were set. '
f'Using the value of "{new_name[i]}".',
AstropyUserWarning)
else:
raise TypeError(
f'cannot specify both "{old_name[i]}" and '
f'"{new_name[i]}".')
else:
# Pass the value of the old argument with the
# name of the new argument to the function
if new_name[i] is not None:
kwargs[new_name[i]] = value
# If old argument has no replacement, cast it back.
# https://github.com/astropy/astropy/issues/9914
else:
kwargs[old_name[i]] = value
# Deprecated keyword without replacement is given as
# positional argument.
elif (not pending[i] and not new_name[i] and position[i] and
len(args) > position[i]):
if alternative and not message[i]:
msg += f'\n Use {alternative} instead.'
warnings.warn(msg, warning_type, stacklevel=2)
return function(*args, **kwargs)
return wrapper
return decorator
# TODO: This can still be made to work for setters by implementing an
# accompanying metaclass that supports it; we just don't need that right this
# second
class classproperty(property):
"""
Similar to `property`, but allows class-level properties. That is,
a property whose getter is like a `classmethod`.
The wrapped method may explicitly use the `classmethod` decorator (which
must become before this decorator), or the `classmethod` may be omitted
(it is implicit through use of this decorator).
.. note::
classproperty only works for *read-only* properties. It does not
currently allow writeable/deletable properties, due to subtleties of how
Python descriptors work. In order to implement such properties on a class
a metaclass for that class must be implemented.
Parameters
----------
fget : callable
The function that computes the value of this property (in particular,
the function when this is used as a decorator) a la `property`.
doc : str, optional
The docstring for the property--by default inherited from the getter
function.
lazy : bool, optional
If True, caches the value returned by the first call to the getter
function, so that it is only called once (used for lazy evaluation
of an attribute). This is analogous to `lazyproperty`. The ``lazy``
argument can also be used when `classproperty` is used as a decorator
(see the third example below). When used in the decorator syntax this
*must* be passed in as a keyword argument.
Examples
--------
::
>>> class Foo:
... _bar_internal = 1
... @classproperty
... def bar(cls):
... return cls._bar_internal + 1
...
>>> Foo.bar
2
>>> foo_instance = Foo()
>>> foo_instance.bar
2
>>> foo_instance._bar_internal = 2
>>> foo_instance.bar # Ignores instance attributes
2
As previously noted, a `classproperty` is limited to implementing
read-only attributes::
>>> class Foo:
... _bar_internal = 1
... @classproperty
... def bar(cls):
... return cls._bar_internal
... @bar.setter
... def bar(cls, value):
... cls._bar_internal = value
...
Traceback (most recent call last):
...
NotImplementedError: classproperty can only be read-only; use a
metaclass to implement modifiable class-level properties
When the ``lazy`` option is used, the getter is only called once::
>>> class Foo:
... @classproperty(lazy=True)
... def bar(cls):
... print("Performing complicated calculation")
... return 1
...
>>> Foo.bar
Performing complicated calculation
1
>>> Foo.bar
1
If a subclass inherits a lazy `classproperty` the property is still
re-evaluated for the subclass::
>>> class FooSub(Foo):
... pass
...
>>> FooSub.bar
Performing complicated calculation
1
>>> FooSub.bar
1
"""
def __new__(cls, fget=None, doc=None, lazy=False):
if fget is None:
# Being used as a decorator--return a wrapper that implements
# decorator syntax
def wrapper(func):
return cls(func, lazy=lazy)
return wrapper
return super().__new__(cls)
def __init__(self, fget, doc=None, lazy=False):
self._lazy = lazy
if lazy:
self._lock = threading.RLock() # Protects _cache
self._cache = {}
fget = self._wrap_fget(fget)
super().__init__(fget=fget, doc=doc)
# There is a buglet in Python where self.__doc__ doesn't
# get set properly on instances of property subclasses if
# the doc argument was used rather than taking the docstring
# from fget
# Related Python issue: https://bugs.python.org/issue24766
if doc is not None:
self.__doc__ = doc
def __get__(self, obj, objtype):
if self._lazy:
val = self._cache.get(objtype, _NotFound)
if val is _NotFound:
with self._lock:
# Check if another thread initialised before we locked.
val = self._cache.get(objtype, _NotFound)
if val is _NotFound:
val = self.fget.__wrapped__(objtype)
self._cache[objtype] = val
else:
# The base property.__get__ will just return self here;
# instead we pass objtype through to the original wrapped
# function (which takes the class as its sole argument)
val = self.fget.__wrapped__(objtype)
return val
def getter(self, fget):
return super().getter(self._wrap_fget(fget))
def setter(self, fset):
raise NotImplementedError(
"classproperty can only be read-only; use a metaclass to "
"implement modifiable class-level properties")
def deleter(self, fdel):
raise NotImplementedError(
"classproperty can only be read-only; use a metaclass to "
"implement modifiable class-level properties")
@staticmethod
def _wrap_fget(orig_fget):
if isinstance(orig_fget, classmethod):
orig_fget = orig_fget.__func__
# Using stock functools.wraps instead of the fancier version
# found later in this module, which is overkill for this purpose
@functools.wraps(orig_fget)
def fget(obj):
return orig_fget(obj.__class__)
return fget
# Adapted from the recipe at
# http://code.activestate.com/recipes/363602-lazy-property-evaluation
class lazyproperty(property):
"""
Works similarly to property(), but computes the value only once.
This essentially memorizes the value of the property by storing the result
of its computation in the ``__dict__`` of the object instance. This is
useful for computing the value of some property that should otherwise be
invariant. For example::
>>> class LazyTest:
... @lazyproperty
... def complicated_property(self):
... print('Computing the value for complicated_property...')
... return 42
...
>>> lt = LazyTest()
>>> lt.complicated_property
Computing the value for complicated_property...
42
>>> lt.complicated_property
42
As the example shows, the second time ``complicated_property`` is accessed,
the ``print`` statement is not executed. Only the return value from the
first access off ``complicated_property`` is returned.
By default, a setter and deleter are used which simply overwrite and
delete, respectively, the value stored in ``__dict__``. Any user-specified
setter or deleter is executed before executing these default actions.
The one exception is that the default setter is not run if the user setter
already sets the new value in ``__dict__`` and returns that value and the
returned value is not ``None``.
"""
def __init__(self, fget, fset=None, fdel=None, doc=None):
super().__init__(fget, fset, fdel, doc)
self._key = self.fget.__name__
self._lock = threading.RLock()
def __get__(self, obj, owner=None):
try:
obj_dict = obj.__dict__
val = obj_dict.get(self._key, _NotFound)
if val is _NotFound:
with self._lock:
# Check if another thread beat us to it.
val = obj_dict.get(self._key, _NotFound)
if val is _NotFound:
val = self.fget(obj)
obj_dict[self._key] = val
return val
except AttributeError:
if obj is None:
return self
raise
def __set__(self, obj, val):
obj_dict = obj.__dict__
if self.fset:
ret = self.fset(obj, val)
if ret is not None and obj_dict.get(self._key) is ret:
# By returning the value set the setter signals that it
# took over setting the value in obj.__dict__; this
# mechanism allows it to override the input value
return
obj_dict[self._key] = val
def __delete__(self, obj):
if self.fdel:
self.fdel(obj)
obj.__dict__.pop(self._key, None) # Delete if present
class sharedmethod(classmethod):
"""
This is a method decorator that allows both an instancemethod and a
`classmethod` to share the same name.
When using `sharedmethod` on a method defined in a class's body, it
may be called on an instance, or on a class. In the former case it
behaves like a normal instance method (a reference to the instance is
automatically passed as the first ``self`` argument of the method)::
>>> class Example:
... @sharedmethod
... def identify(self, *args):
... print('self was', self)
... print('additional args were', args)
...
>>> ex = Example()
>>> ex.identify(1, 2)
self was <astropy.utils.decorators.Example object at 0x...>
additional args were (1, 2)
In the latter case, when the `sharedmethod` is called directly from a
class, it behaves like a `classmethod`::
>>> Example.identify(3, 4)
self was <class 'astropy.utils.decorators.Example'>
additional args were (3, 4)
This also supports a more advanced usage, where the `classmethod`
implementation can be written separately. If the class's *metaclass*
has a method of the same name as the `sharedmethod`, the version on
the metaclass is delegated to::
>>> class ExampleMeta(type):
... def identify(self):
... print('this implements the {0}.identify '
... 'classmethod'.format(self.__name__))
...
>>> class Example(metaclass=ExampleMeta):
... @sharedmethod
... def identify(self):
... print('this implements the instancemethod')
...
>>> Example().identify()
this implements the instancemethod
>>> Example.identify()
this implements the Example.identify classmethod
"""
def __get__(self, obj, objtype=None):
if obj is None:
mcls = type(objtype)
clsmeth = getattr(mcls, self.__func__.__name__, None)
if callable(clsmeth):
func = clsmeth
else:
func = self.__func__
return self._make_method(func, objtype)
else:
return self._make_method(self.__func__, obj)
@staticmethod
def _make_method(func, instance):
return types.MethodType(func, instance)
def format_doc(docstring, *args, **kwargs):
"""
Replaces the docstring of the decorated object and then formats it.
The formatting works like :meth:`str.format` and if the decorated object
already has a docstring this docstring can be included in the new
documentation if you use the ``{__doc__}`` placeholder.
Its primary use is for reusing a *long* docstring in multiple functions
when it is the same or only slightly different between them.
Parameters
----------
docstring : str or object or None
The docstring that will replace the docstring of the decorated
object. If it is an object like a function or class it will
take the docstring of this object. If it is a string it will use the
string itself. One special case is if the string is ``None`` then
it will use the decorated functions docstring and formats it.
args :
passed to :meth:`str.format`.
kwargs :
passed to :meth:`str.format`. If the function has a (not empty)
docstring the original docstring is added to the kwargs with the
keyword ``'__doc__'``.
Raises
------
ValueError
If the ``docstring`` (or interpreted docstring if it was ``None``
or not a string) is empty.
IndexError, KeyError
If a placeholder in the (interpreted) ``docstring`` was not filled. see
:meth:`str.format` for more information.
Notes
-----
Using this decorator allows, for example Sphinx, to parse the
correct docstring.
Examples
--------
Replacing the current docstring is very easy::
>>> from astropy.utils.decorators import format_doc
>>> @format_doc('''Perform num1 + num2''')
... def add(num1, num2):
... return num1+num2
...
>>> help(add) # doctest: +SKIP
Help on function add in module __main__:
<BLANKLINE>
add(num1, num2)
Perform num1 + num2
sometimes instead of replacing you only want to add to it::
>>> doc = '''
... {__doc__}
... Parameters
... ----------
... num1, num2 : Numbers
... Returns
... -------
... result: Number
... '''
>>> @format_doc(doc)
... def add(num1, num2):
... '''Perform addition.'''
... return num1+num2
...
>>> help(add) # doctest: +SKIP
Help on function add in module __main__:
<BLANKLINE>
add(num1, num2)
Perform addition.
Parameters
----------
num1, num2 : Numbers
Returns
-------
result : Number
in case one might want to format it further::
>>> doc = '''
... Perform {0}.
... Parameters
... ----------
... num1, num2 : Numbers
... Returns
... -------
... result: Number
... result of num1 {op} num2
... {__doc__}
... '''
>>> @format_doc(doc, 'addition', op='+')
... def add(num1, num2):
... return num1+num2
...
>>> @format_doc(doc, 'subtraction', op='-')
... def subtract(num1, num2):
... '''Notes: This one has additional notes.'''
... return num1-num2
...
>>> help(add) # doctest: +SKIP
Help on function add in module __main__:
<BLANKLINE>
add(num1, num2)
Perform addition.
Parameters
----------
num1, num2 : Numbers
Returns
-------
result : Number
result of num1 + num2
>>> help(subtract) # doctest: +SKIP
Help on function subtract in module __main__:
<BLANKLINE>
subtract(num1, num2)
Perform subtraction.
Parameters
----------
num1, num2 : Numbers
Returns
-------
result : Number
result of num1 - num2
Notes : This one has additional notes.
These methods can be combined; even taking the docstring from another
object is possible as docstring attribute. You just have to specify the
object::
>>> @format_doc(add)
... def another_add(num1, num2):
... return num1 + num2
...
>>> help(another_add) # doctest: +SKIP
Help on function another_add in module __main__:
<BLANKLINE>
another_add(num1, num2)
Perform addition.
Parameters
----------
num1, num2 : Numbers
Returns
-------
result : Number
result of num1 + num2
But be aware that this decorator *only* formats the given docstring not
the strings passed as ``args`` or ``kwargs`` (not even the original
docstring)::
>>> @format_doc(doc, 'addition', op='+')
... def yet_another_add(num1, num2):
... '''This one is good for {0}.'''
... return num1 + num2
...
>>> help(yet_another_add) # doctest: +SKIP
Help on function yet_another_add in module __main__:
<BLANKLINE>
yet_another_add(num1, num2)
Perform addition.
Parameters
----------
num1, num2 : Numbers
Returns
-------
result : Number
result of num1 + num2
This one is good for {0}.
To work around it you could specify the docstring to be ``None``::
>>> @format_doc(None, 'addition')
... def last_add_i_swear(num1, num2):
... '''This one is good for {0}.'''
... return num1 + num2
...
>>> help(last_add_i_swear) # doctest: +SKIP
Help on function last_add_i_swear in module __main__:
<BLANKLINE>
last_add_i_swear(num1, num2)
This one is good for addition.
Using it with ``None`` as docstring allows to use the decorator twice
on an object to first parse the new docstring and then to parse the
original docstring or the ``args`` and ``kwargs``.
"""
def set_docstring(obj):
if docstring is None:
# None means: use the objects __doc__
doc = obj.__doc__
# Delete documentation in this case so we don't end up with
# awkwardly self-inserted docs.
obj.__doc__ = None
elif isinstance(docstring, str):
# String: use the string that was given
doc = docstring
else:
# Something else: Use the __doc__ of this
doc = docstring.__doc__
if not doc:
# In case the docstring is empty it's probably not what was wanted.
raise ValueError('docstring must be a string or containing a '
'docstring that is not empty.')
# If the original has a not-empty docstring append it to the format
# kwargs.
kwargs['__doc__'] = obj.__doc__ or ''
obj.__doc__ = doc.format(*args, **kwargs)
return obj
return set_docstring
|
e1a05478c073ff232837c38de2801ebfa1695a657565bdd6f1f1400251ab726a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Utilities for generating new Python code at runtime."""
import inspect
import itertools
import keyword
import os
import re
import textwrap
from .introspection import find_current_module
__all__ = ['make_function_with_signature']
_ARGNAME_RE = re.compile(r'^[A-Za-z][A-Za-z_]*')
"""
Regular expression used my make_func which limits the allowed argument
names for the created function. Only valid Python variable names in
the ASCII range and not beginning with '_' are allowed, currently.
"""
def make_function_with_signature(func, args=(), kwargs={}, varargs=None,
varkwargs=None, name=None):
"""
Make a new function from an existing function but with the desired
signature.
The desired signature must of course be compatible with the arguments
actually accepted by the input function.
The ``args`` are strings that should be the names of the positional
arguments. ``kwargs`` can map names of keyword arguments to their
default values. It may be either a ``dict`` or a list of ``(keyword,
default)`` tuples.
If ``varargs`` is a string it is added to the positional arguments as
``*<varargs>``. Likewise ``varkwargs`` can be the name for a variable
keyword argument placeholder like ``**<varkwargs>``.
If not specified the name of the new function is taken from the original
function. Otherwise, the ``name`` argument can be used to specify a new
name.
Note, the names may only be valid Python variable names.
"""
pos_args = []
key_args = []
if isinstance(kwargs, dict):
iter_kwargs = kwargs.items()
else:
iter_kwargs = iter(kwargs)
# Check that all the argument names are valid
for item in itertools.chain(args, iter_kwargs):
if isinstance(item, tuple):
argname = item[0]
key_args.append(item)
else:
argname = item
pos_args.append(item)
if keyword.iskeyword(argname) or not _ARGNAME_RE.match(argname):
raise SyntaxError(f'invalid argument name: {argname}')
for item in (varargs, varkwargs):
if item is not None:
if keyword.iskeyword(item) or not _ARGNAME_RE.match(item):
raise SyntaxError(f'invalid argument name: {item}')
def_signature = [', '.join(pos_args)]
if varargs:
def_signature.append(f', *{varargs}')
call_signature = def_signature[:]
if name is None:
name = func.__name__
global_vars = {f'__{name}__func': func}
local_vars = {}
# Make local variables to handle setting the default args
for idx, item in enumerate(key_args):
key, value = item
default_var = f'_kwargs{idx}'
local_vars[default_var] = value
def_signature.append(f', {key}={default_var}')
call_signature.append(', {0}={0}'.format(key))
if varkwargs:
def_signature.append(f', **{varkwargs}')
call_signature.append(f', **{varkwargs}')
def_signature = ''.join(def_signature).lstrip(', ')
call_signature = ''.join(call_signature).lstrip(', ')
mod = find_current_module(2)
frm = inspect.currentframe().f_back
if mod:
filename = mod.__file__
modname = mod.__name__
if filename.endswith('.pyc'):
filename = os.path.splitext(filename)[0] + '.py'
else:
filename = '<string>'
modname = '__main__'
# Subtract 2 from the line number since the length of the template itself
# is two lines. Therefore we have to subtract those off in order for the
# pointer in tracebacks from __{name}__func to point to the right spot.
lineno = frm.f_lineno - 2
# The lstrip is in case there were *no* positional arguments (a rare case)
# in any context this will actually be used...
template = textwrap.dedent("""{0}\
def {name}({sig1}):
return __{name}__func({sig2})
""".format('\n' * lineno, name=name, sig1=def_signature,
sig2=call_signature))
code = compile(template, filename, 'single')
eval(code, global_vars, local_vars)
new_func = local_vars[name]
new_func.__module__ = modname
new_func.__doc__ = func.__doc__
return new_func
|
094806b2a15b6cf5ef7180ca262d196a0a8390390771fd4d1e2851985099c0c3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Utilities for console input and output.
"""
import codecs
import locale
import re
import math
import multiprocessing
import os
import struct
import sys
import threading
import time
# concurrent.futures imports moved inside functions using them to avoid
# import failure when running in pyodide/Emscripten
try:
import fcntl
import termios
import signal
_CAN_RESIZE_TERMINAL = True
except ImportError:
_CAN_RESIZE_TERMINAL = False
from astropy import conf
from .misc import isiterable
from .decorators import classproperty
__all__ = [
'isatty', 'color_print', 'human_time', 'human_file_size',
'ProgressBar', 'Spinner', 'print_code_line', 'ProgressBarOrSpinner',
'terminal_size']
_DEFAULT_ENCODING = 'utf-8'
class _IPython:
"""Singleton class given access to IPython streams, etc."""
@classproperty
def get_ipython(cls):
try:
from IPython import get_ipython
except ImportError:
pass
return get_ipython
@classproperty
def OutStream(cls):
if not hasattr(cls, '_OutStream'):
cls._OutStream = None
try:
cls.get_ipython()
except NameError:
return None
try:
from ipykernel.iostream import OutStream
except ImportError:
try:
from IPython.zmq.iostream import OutStream
except ImportError:
from IPython import version_info
if version_info[0] >= 4:
return None
try:
from IPython.kernel.zmq.iostream import OutStream
except ImportError:
return None
cls._OutStream = OutStream
return cls._OutStream
@classproperty
def ipyio(cls):
if not hasattr(cls, '_ipyio'):
try:
from IPython.utils import io
except ImportError:
cls._ipyio = None
else:
cls._ipyio = io
return cls._ipyio
@classmethod
def get_stream(cls, stream):
return getattr(cls.ipyio, stream)
def _get_stdout(stderr=False):
"""
This utility function contains the logic to determine what streams to use
by default for standard out/err.
Typically this will just return `sys.stdout`, but it contains additional
logic for use in IPython on Windows to determine the correct stream to use
(usually ``IPython.util.io.stdout`` but only if sys.stdout is a TTY).
"""
if stderr:
stream = 'stderr'
else:
stream = 'stdout'
sys_stream = getattr(sys, stream)
return sys_stream
def isatty(file):
"""
Returns `True` if ``file`` is a tty.
Most built-in Python file-like objects have an `isatty` member,
but some user-defined types may not, so this assumes those are not
ttys.
"""
if (multiprocessing.current_process().name != 'MainProcess' or
threading.current_thread().name != 'MainThread'):
return False
if hasattr(file, 'isatty'):
return file.isatty()
if _IPython.OutStream is None or (not isinstance(file, _IPython.OutStream)):
return False
# File is an IPython OutStream. Check whether:
# - File name is 'stdout'; or
# - File wraps a Console
if getattr(file, 'name', None) == 'stdout':
return True
if hasattr(file, 'stream'):
# FIXME: pyreadline has no had new release since 2015, drop it when
# IPython minversion is 5.x.
# On Windows, in IPython 2 the standard I/O streams will wrap
# pyreadline.Console objects if pyreadline is available; this should
# be considered a TTY.
try:
from pyreadline.console import Console as PyreadlineConsole
except ImportError:
return False
return isinstance(file.stream, PyreadlineConsole)
return False
def terminal_size(file=None):
"""
Returns a tuple (height, width) containing the height and width of
the terminal.
This function will look for the width in height in multiple areas
before falling back on the width and height in astropy's
configuration.
"""
if file is None:
file = _get_stdout()
try:
s = struct.pack("HHHH", 0, 0, 0, 0)
x = fcntl.ioctl(file, termios.TIOCGWINSZ, s)
(lines, width, xpixels, ypixels) = struct.unpack("HHHH", x)
if lines > 12:
lines -= 6
if width > 10:
width -= 1
if lines <= 0 or width <= 0:
raise Exception('unable to get terminal size')
return (lines, width)
except Exception:
try:
# see if POSIX standard variables will work
return (int(os.environ.get('LINES')),
int(os.environ.get('COLUMNS')))
except TypeError:
# fall back on configuration variables, or if not
# set, (25, 80)
lines = conf.max_lines
width = conf.max_width
if lines is None:
lines = 25
if width is None:
width = 80
return lines, width
def _color_text(text, color):
"""
Returns a string wrapped in ANSI color codes for coloring the
text in a terminal::
colored_text = color_text('Here is a message', 'blue')
This won't actually effect the text until it is printed to the
terminal.
Parameters
----------
text : str
The string to return, bounded by the color codes.
color : str
An ANSI terminal color name. Must be one of:
black, red, green, brown, blue, magenta, cyan, lightgrey,
default, darkgrey, lightred, lightgreen, yellow, lightblue,
lightmagenta, lightcyan, white, or '' (the empty string).
"""
color_mapping = {
'black': '0;30',
'red': '0;31',
'green': '0;32',
'brown': '0;33',
'blue': '0;34',
'magenta': '0;35',
'cyan': '0;36',
'lightgrey': '0;37',
'default': '0;39',
'darkgrey': '1;30',
'lightred': '1;31',
'lightgreen': '1;32',
'yellow': '1;33',
'lightblue': '1;34',
'lightmagenta': '1;35',
'lightcyan': '1;36',
'white': '1;37'}
if sys.platform == 'win32' and _IPython.OutStream is None:
# On Windows do not colorize text unless in IPython
return text
color_code = color_mapping.get(color, '0;39')
return f'\033[{color_code}m{text}\033[0m'
def _decode_preferred_encoding(s):
"""Decode the supplied byte string using the preferred encoding
for the locale (`locale.getpreferredencoding`) or, if the default encoding
is invalid, fall back first on utf-8, then on latin-1 if the message cannot
be decoded with utf-8.
"""
enc = locale.getpreferredencoding()
try:
try:
return s.decode(enc)
except LookupError:
enc = _DEFAULT_ENCODING
return s.decode(enc)
except UnicodeDecodeError:
return s.decode('latin-1')
def _write_with_fallback(s, write, fileobj):
"""Write the supplied string with the given write function like
``write(s)``, but use a writer for the locale's preferred encoding in case
of a UnicodeEncodeError. Failing that attempt to write with 'utf-8' or
'latin-1'.
"""
try:
write(s)
return write
except UnicodeEncodeError:
# Let's try the next approach...
pass
enc = locale.getpreferredencoding()
try:
Writer = codecs.getwriter(enc)
except LookupError:
Writer = codecs.getwriter(_DEFAULT_ENCODING)
f = Writer(fileobj)
write = f.write
try:
write(s)
return write
except UnicodeEncodeError:
Writer = codecs.getwriter('latin-1')
f = Writer(fileobj)
write = f.write
# If this doesn't work let the exception bubble up; I'm out of ideas
write(s)
return write
def color_print(*args, end='\n', **kwargs):
"""
Prints colors and styles to the terminal uses ANSI escape
sequences.
::
color_print('This is the color ', 'default', 'GREEN', 'green')
Parameters
----------
positional args : str
The positional arguments come in pairs (*msg*, *color*), where
*msg* is the string to display and *color* is the color to
display it in.
*color* is an ANSI terminal color name. Must be one of:
black, red, green, brown, blue, magenta, cyan, lightgrey,
default, darkgrey, lightred, lightgreen, yellow, lightblue,
lightmagenta, lightcyan, white, or '' (the empty string).
file : writable file-like, optional
Where to write to. Defaults to `sys.stdout`. If file is not
a tty (as determined by calling its `isatty` member, if one
exists), no coloring will be included.
end : str, optional
The ending of the message. Defaults to ``\\n``. The end will
be printed after resetting any color or font state.
"""
file = kwargs.get('file', _get_stdout())
write = file.write
if isatty(file) and conf.use_color:
for i in range(0, len(args), 2):
msg = args[i]
if i + 1 == len(args):
color = ''
else:
color = args[i + 1]
if color:
msg = _color_text(msg, color)
# Some file objects support writing unicode sensibly on some Python
# versions; if this fails try creating a writer using the locale's
# preferred encoding. If that fails too give up.
write = _write_with_fallback(msg, write, file)
write(end)
else:
for i in range(0, len(args), 2):
msg = args[i]
write(msg)
write(end)
def strip_ansi_codes(s):
"""
Remove ANSI color codes from the string.
"""
return re.sub('\033\\[([0-9]+)(;[0-9]+)*m', '', s)
def human_time(seconds):
"""
Returns a human-friendly time string that is always exactly 6
characters long.
Depending on the number of seconds given, can be one of::
1w 3d
2d 4h
1h 5m
1m 4s
15s
Will be in color if console coloring is turned on.
Parameters
----------
seconds : int
The number of seconds to represent
Returns
-------
time : str
A human-friendly representation of the given number of seconds
that is always exactly 6 characters.
"""
units = [
('y', 60 * 60 * 24 * 7 * 52),
('w', 60 * 60 * 24 * 7),
('d', 60 * 60 * 24),
('h', 60 * 60),
('m', 60),
('s', 1),
]
seconds = int(seconds)
if seconds < 60:
return f' {seconds:2d}s'
for i in range(len(units) - 1):
unit1, limit1 = units[i]
unit2, limit2 = units[i + 1]
if seconds >= limit1:
return '{:2d}{}{:2d}{}'.format(
seconds // limit1, unit1,
(seconds % limit1) // limit2, unit2)
return ' ~inf'
def human_file_size(size):
"""
Returns a human-friendly string representing a file size
that is 2-4 characters long.
For example, depending on the number of bytes given, can be one
of::
256b
64k
1.1G
Parameters
----------
size : int
The size of the file (in bytes)
Returns
-------
size : str
A human-friendly representation of the size of the file
"""
if hasattr(size, 'unit'):
# Import units only if necessary because the import takes a
# significant time [#4649]
from astropy import units as u
size = u.Quantity(size, u.byte).value
suffixes = ' kMGTPEZY'
if size == 0:
num_scale = 0
else:
num_scale = int(math.floor(math.log(size) / math.log(1000)))
if num_scale > 7:
suffix = '?'
else:
suffix = suffixes[num_scale]
num_scale = int(math.pow(1000, num_scale))
value = size / num_scale
str_value = str(value)
if suffix == ' ':
str_value = str_value[:str_value.index('.')]
elif str_value[2] == '.':
str_value = str_value[:2]
else:
str_value = str_value[:3]
return f"{str_value:>3s}{suffix}"
class _mapfunc:
"""
A function wrapper to support ProgressBar.map().
"""
def __init__(self, func):
self._func = func
def __call__(self, i_arg):
i, arg = i_arg
return i, self._func(arg)
class ProgressBar:
"""
A class to display a progress bar in the terminal.
It is designed to be used either with the ``with`` statement::
with ProgressBar(len(items)) as bar:
for item in enumerate(items):
bar.update()
or as a generator::
for item in ProgressBar(items):
item.process()
"""
def __init__(self, total_or_items, ipython_widget=False, file=None):
"""
Parameters
----------
total_or_items : int or sequence
If an int, the number of increments in the process being
tracked. If a sequence, the items to iterate over.
ipython_widget : bool, optional
If `True`, the progress bar will display as an IPython
notebook widget.
file : writable file-like, optional
The file to write the progress bar to. Defaults to
`sys.stdout`. If ``file`` is not a tty (as determined by
calling its `isatty` member, if any, or special case hacks
to detect the IPython console), the progress bar will be
completely silent.
"""
if file is None:
file = _get_stdout()
if not ipython_widget and not isatty(file):
self.update = self._silent_update
self._silent = True
else:
self._silent = False
if isiterable(total_or_items):
self._items = iter(total_or_items)
self._total = len(total_or_items)
else:
try:
self._total = int(total_or_items)
except TypeError:
raise TypeError("First argument must be int or sequence")
else:
self._items = iter(range(self._total))
self._file = file
self._start_time = time.time()
self._human_total = human_file_size(self._total)
self._ipython_widget = ipython_widget
self._signal_set = False
if not ipython_widget:
self._should_handle_resize = (
_CAN_RESIZE_TERMINAL and self._file.isatty())
self._handle_resize()
if self._should_handle_resize:
signal.signal(signal.SIGWINCH, self._handle_resize)
self._signal_set = True
self.update(0)
def _handle_resize(self, signum=None, frame=None):
terminal_width = terminal_size(self._file)[1]
self._bar_length = terminal_width - 37
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if not self._silent:
if exc_type is None:
self.update(self._total)
self._file.write('\n')
self._file.flush()
if self._signal_set:
signal.signal(signal.SIGWINCH, signal.SIG_DFL)
def __iter__(self):
return self
def __next__(self):
try:
rv = next(self._items)
except StopIteration:
self.__exit__(None, None, None)
raise
else:
self.update()
return rv
def update(self, value=None):
"""
Update progress bar via the console or notebook accordingly.
"""
# Update self.value
if value is None:
value = self._current_value + 1
self._current_value = value
# Choose the appropriate environment
if self._ipython_widget:
self._update_ipython_widget(value)
else:
self._update_console(value)
def _update_console(self, value=None):
"""
Update the progress bar to the given value (out of the total
given to the constructor).
"""
if self._total == 0:
frac = 1.0
else:
frac = float(value) / float(self._total)
file = self._file
write = file.write
if frac > 1:
bar_fill = int(self._bar_length)
else:
bar_fill = int(float(self._bar_length) * frac)
write('\r|')
color_print('=' * bar_fill, 'blue', file=file, end='')
if bar_fill < self._bar_length:
color_print('>', 'green', file=file, end='')
write('-' * (self._bar_length - bar_fill - 1))
write('|')
if value >= self._total:
t = time.time() - self._start_time
prefix = ' '
elif value <= 0:
t = None
prefix = ''
else:
t = ((time.time() - self._start_time) * (1.0 - frac)) / frac
prefix = ' ETA '
write(f' {human_file_size(value):>4s}/{self._human_total:>4s}')
write(f' ({frac:>6.2%})')
write(prefix)
if t is not None:
write(human_time(t))
self._file.flush()
def _update_ipython_widget(self, value=None):
"""
Update the progress bar to the given value (out of a total
given to the constructor).
This method is for use in the IPython notebook 2+.
"""
# Create and display an empty progress bar widget,
# if none exists.
if not hasattr(self, '_widget'):
# Import only if an IPython widget, i.e., widget in iPython NB
from IPython import version_info
if version_info[0] < 4:
from IPython.html import widgets
self._widget = widgets.FloatProgressWidget()
else:
_IPython.get_ipython()
from ipywidgets import widgets
self._widget = widgets.FloatProgress()
from IPython.display import display
display(self._widget)
self._widget.value = 0
# Calculate percent completion, and update progress bar
frac = (value/self._total)
self._widget.value = frac * 100
self._widget.description = f' ({frac:>6.2%})'
def _silent_update(self, value=None):
pass
@classmethod
def map(cls, function, items, multiprocess=False, file=None, step=100,
ipython_widget=False, multiprocessing_start_method=None):
"""Map function over items while displaying a progress bar with percentage complete.
The map operation may run in arbitrary order on the items, but the results are
returned in sequential order.
::
def work(i):
print(i)
ProgressBar.map(work, range(50))
Parameters
----------
function : function
Function to call for each step
items : sequence
Sequence where each element is a tuple of arguments to pass to
*function*.
multiprocess : bool, int, optional
If `True`, use the `multiprocessing` module to distribute each task
to a different processor core. If a number greater than 1, then use
that number of cores.
ipython_widget : bool, optional
If `True`, the progress bar will display as an IPython
notebook widget.
file : writable file-like, optional
The file to write the progress bar to. Defaults to
`sys.stdout`. If ``file`` is not a tty (as determined by
calling its `isatty` member, if any), the scrollbar will
be completely silent.
step : int, optional
Update the progress bar at least every *step* steps (default: 100).
If ``multiprocess`` is `True`, this will affect the size
of the chunks of ``items`` that are submitted as separate tasks
to the process pool. A large step size may make the job
complete faster if ``items`` is very long.
multiprocessing_start_method : str, optional
Useful primarily for testing; if in doubt leave it as the default.
When using multiprocessing, certain anomalies occur when starting
processes with the "spawn" method (the only option on Windows);
other anomalies occur with the "fork" method (the default on
Linux).
"""
if multiprocess:
function = _mapfunc(function)
items = list(enumerate(items))
results = cls.map_unordered(
function, items, multiprocess=multiprocess,
file=file, step=step,
ipython_widget=ipython_widget,
multiprocessing_start_method=multiprocessing_start_method)
if multiprocess:
_, results = zip(*sorted(results))
results = list(results)
return results
@classmethod
def map_unordered(cls, function, items, multiprocess=False, file=None,
step=100, ipython_widget=False,
multiprocessing_start_method=None):
"""Map function over items, reporting the progress.
Does a `map` operation while displaying a progress bar with
percentage complete. The map operation may run on arbitrary order
on the items, and the results may be returned in arbitrary order.
::
def work(i):
print(i)
ProgressBar.map(work, range(50))
Parameters
----------
function : function
Function to call for each step
items : sequence
Sequence where each element is a tuple of arguments to pass to
*function*.
multiprocess : bool, int, optional
If `True`, use the `multiprocessing` module to distribute each task
to a different processor core. If a number greater than 1, then use
that number of cores.
ipython_widget : bool, optional
If `True`, the progress bar will display as an IPython
notebook widget.
file : writable file-like, optional
The file to write the progress bar to. Defaults to
`sys.stdout`. If ``file`` is not a tty (as determined by
calling its `isatty` member, if any), the scrollbar will
be completely silent.
step : int, optional
Update the progress bar at least every *step* steps (default: 100).
If ``multiprocess`` is `True`, this will affect the size
of the chunks of ``items`` that are submitted as separate tasks
to the process pool. A large step size may make the job
complete faster if ``items`` is very long.
multiprocessing_start_method : str, optional
Useful primarily for testing; if in doubt leave it as the default.
When using multiprocessing, certain anomalies occur when starting
processes with the "spawn" method (the only option on Windows);
other anomalies occur with the "fork" method (the default on
Linux).
"""
# concurrent.futures import here to avoid import failure when running
# in pyodide/Emscripten
from concurrent.futures import ProcessPoolExecutor, as_completed
results = []
if file is None:
file = _get_stdout()
with cls(len(items), ipython_widget=ipython_widget, file=file) as bar:
if bar._ipython_widget:
chunksize = step
else:
default_step = max(int(float(len(items)) / bar._bar_length), 1)
chunksize = min(default_step, step)
if not multiprocess or multiprocess < 1:
for i, item in enumerate(items):
results.append(function(item))
if (i % chunksize) == 0:
bar.update(i)
else:
ctx = multiprocessing.get_context(multiprocessing_start_method)
kwargs = dict(mp_context=ctx)
with ProcessPoolExecutor(
max_workers=(int(multiprocess)
if multiprocess is not True
else None),
**kwargs) as p:
for i, f in enumerate(
as_completed(
p.submit(function, item)
for item in items)):
bar.update(i)
results.append(f.result())
return results
class Spinner:
"""
A class to display a spinner in the terminal.
It is designed to be used with the ``with`` statement::
with Spinner("Reticulating splines", "green") as s:
for item in enumerate(items):
s.update()
"""
_default_unicode_chars = "◓◑◒◐"
_default_ascii_chars = "-/|\\"
def __init__(self, msg, color='default', file=None, step=1,
chars=None):
"""
Parameters
----------
msg : str
The message to print
color : str, optional
An ANSI terminal color name. Must be one of: black, red,
green, brown, blue, magenta, cyan, lightgrey, default,
darkgrey, lightred, lightgreen, yellow, lightblue,
lightmagenta, lightcyan, white.
file : writable file-like, optional
The file to write the spinner to. Defaults to
`sys.stdout`. If ``file`` is not a tty (as determined by
calling its `isatty` member, if any, or special case hacks
to detect the IPython console), the spinner will be
completely silent.
step : int, optional
Only update the spinner every *step* steps
chars : str, optional
The character sequence to use for the spinner
"""
if file is None:
file = _get_stdout()
self._msg = msg
self._color = color
self._file = file
self._step = step
if chars is None:
if conf.unicode_output:
chars = self._default_unicode_chars
else:
chars = self._default_ascii_chars
self._chars = chars
self._silent = not isatty(file)
if self._silent:
self._iter = self._silent_iterator()
else:
self._iter = self._iterator()
def _iterator(self):
chars = self._chars
index = 0
file = self._file
write = file.write
flush = file.flush
try_fallback = True
while True:
write('\r')
color_print(self._msg, self._color, file=file, end='')
write(' ')
try:
if try_fallback:
write = _write_with_fallback(chars[index], write, file)
else:
write(chars[index])
except UnicodeError:
# If even _write_with_fallback failed for any reason just give
# up on trying to use the unicode characters
chars = self._default_ascii_chars
write(chars[index])
try_fallback = False # No good will come of using this again
flush()
yield
for i in range(self._step):
yield
index = (index + 1) % len(chars)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
file = self._file
write = file.write
flush = file.flush
if not self._silent:
write('\r')
color_print(self._msg, self._color, file=file, end='')
if exc_type is None:
color_print(' [Done]', 'green', file=file)
else:
color_print(' [Failed]', 'red', file=file)
flush()
def __iter__(self):
return self
def __next__(self):
next(self._iter)
def update(self, value=None):
"""Update the spin wheel in the terminal.
Parameters
----------
value : int, optional
Ignored (present just for compatibility with `ProgressBar.update`).
"""
next(self)
def _silent_iterator(self):
color_print(self._msg, self._color, file=self._file, end='')
self._file.flush()
while True:
yield
class ProgressBarOrSpinner:
"""
A class that displays either a `ProgressBar` or `Spinner`
depending on whether the total size of the operation is
known or not.
It is designed to be used with the ``with`` statement::
if file.has_length():
length = file.get_length()
else:
length = None
bytes_read = 0
with ProgressBarOrSpinner(length) as bar:
while file.read(blocksize):
bytes_read += blocksize
bar.update(bytes_read)
"""
def __init__(self, total, msg, color='default', file=None):
"""
Parameters
----------
total : int or None
If an int, the number of increments in the process being
tracked and a `ProgressBar` is displayed. If `None`, a
`Spinner` is displayed.
msg : str
The message to display above the `ProgressBar` or
alongside the `Spinner`.
color : str, optional
The color of ``msg``, if any. Must be an ANSI terminal
color name. Must be one of: black, red, green, brown,
blue, magenta, cyan, lightgrey, default, darkgrey,
lightred, lightgreen, yellow, lightblue, lightmagenta,
lightcyan, white.
file : writable file-like, optional
The file to write the to. Defaults to `sys.stdout`. If
``file`` is not a tty (as determined by calling its `isatty`
member, if any), only ``msg`` will be displayed: the
`ProgressBar` or `Spinner` will be silent.
"""
if file is None:
file = _get_stdout()
if total is None or not isatty(file):
self._is_spinner = True
self._obj = Spinner(msg, color=color, file=file)
else:
self._is_spinner = False
color_print(msg, color, file=file)
self._obj = ProgressBar(total, file=file)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
return self._obj.__exit__(exc_type, exc_value, traceback)
def update(self, value):
"""
Update the progress bar to the given value (out of the total
given to the constructor.
"""
self._obj.update(value)
def print_code_line(line, col=None, file=None, tabwidth=8, width=70):
"""
Prints a line of source code, highlighting a particular character
position in the line. Useful for displaying the context of error
messages.
If the line is more than ``width`` characters, the line is truncated
accordingly and '…' characters are inserted at the front and/or
end.
It looks like this::
there_is_a_syntax_error_here :
^
Parameters
----------
line : unicode
The line of code to display
col : int, optional
The character in the line to highlight. ``col`` must be less
than ``len(line)``.
file : writable file-like, optional
Where to write to. Defaults to `sys.stdout`.
tabwidth : int, optional
The number of spaces per tab (``'\\t'``) character. Default
is 8. All tabs will be converted to spaces to ensure that the
caret lines up with the correct column.
width : int, optional
The width of the display, beyond which the line will be
truncated. Defaults to 70 (this matches the default in the
standard library's `textwrap` module).
"""
if file is None:
file = _get_stdout()
if conf.unicode_output:
ellipsis = '…'
else:
ellipsis = '...'
write = file.write
if col is not None:
if col >= len(line):
raise ValueError('col must be less the the line length.')
ntabs = line[:col].count('\t')
col += ntabs * (tabwidth - 1)
line = line.rstrip('\n')
line = line.replace('\t', ' ' * tabwidth)
if col is not None and col > width:
new_col = min(width // 2, len(line) - col)
offset = col - new_col
line = line[offset + len(ellipsis):]
width -= len(ellipsis)
new_col = col
col -= offset
color_print(ellipsis, 'darkgrey', file=file, end='')
if len(line) > width:
write(line[:width - len(ellipsis)])
color_print(ellipsis, 'darkgrey', file=file)
else:
write(line)
write('\n')
if col is not None:
write(' ' * col)
color_print('^', 'red', file=file)
# The following four Getch* classes implement unbuffered character reading from
# stdin on Windows, linux, MacOSX. This is taken directly from ActiveState
# Code Recipes:
# http://code.activestate.com/recipes/134892-getch-like-unbuffered-character-reading-from-stdin/
#
class Getch:
"""Get a single character from standard input without screen echo.
Returns
-------
char : str (one character)
"""
def __init__(self):
try:
self.impl = _GetchWindows()
except ImportError:
try:
self.impl = _GetchMacCarbon()
except (ImportError, AttributeError):
self.impl = _GetchUnix()
def __call__(self):
return self.impl()
class _GetchUnix:
def __init__(self):
import tty # pylint: disable=W0611
import sys # pylint: disable=W0611
# import termios now or else you'll get the Unix
# version on the Mac
import termios # pylint: disable=W0611
def __call__(self):
import sys
import tty
import termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class _GetchWindows:
def __init__(self):
import msvcrt # pylint: disable=W0611
def __call__(self):
import msvcrt
return msvcrt.getch()
class _GetchMacCarbon:
"""
A function which returns the current ASCII key that is down;
if no ASCII key is down, the null string is returned. The
page http://www.mactech.com/macintosh-c/chap02-1.html was
very helpful in figuring out how to do this.
"""
def __init__(self):
import Carbon
Carbon.Evt # see if it has this (in Unix, it doesn't)
def __call__(self):
import Carbon
if Carbon.Evt.EventAvail(0x0008)[0] == 0: # 0x0008 is the keyDownMask
return ''
else:
#
# The event contains the following info:
# (what,msg,when,where,mod)=Carbon.Evt.GetNextEvent(0x0008)[1]
#
# The message (msg) contains the ASCII char which is
# extracted with the 0x000000FF charCodeMask; this
# number is converted to an ASCII character with chr() and
# returned
#
(what, msg, when, where, mod) = Carbon.Evt.GetNextEvent(0x0008)[1]
return chr(msg & 0x000000FF)
|
b0940860684cadd4f41ed6a0a504e46249a6f597212fab4b4b592ce0e4f46886 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Functions related to Python runtime introspection."""
import collections
import inspect
import os
import sys
import types
import importlib
from importlib import metadata
from packaging.version import Version
from astropy.utils.decorators import deprecated_renamed_argument
__all__ = ['resolve_name', 'minversion', 'find_current_module',
'isinstancemethod']
__doctest_skip__ = ['find_current_module']
if sys.version_info[:2] >= (3, 10):
from importlib.metadata import packages_distributions
else:
def packages_distributions():
"""
Return a mapping of top-level packages to their distributions.
Note: copied from https://github.com/python/importlib_metadata/pull/287
"""
pkg_to_dist = collections.defaultdict(list)
for dist in metadata.distributions():
for pkg in (dist.read_text('top_level.txt') or '').split():
pkg_to_dist[pkg].append(dist.metadata['Name'])
return dict(pkg_to_dist)
def resolve_name(name, *additional_parts):
"""Resolve a name like ``module.object`` to an object and return it.
This ends up working like ``from module import object`` but is easier
to deal with than the `__import__` builtin and supports digging into
submodules.
Parameters
----------
name : `str`
A dotted path to a Python object--that is, the name of a function,
class, or other object in a module with the full path to that module,
including parent modules, separated by dots. Also known as the fully
qualified name of the object.
additional_parts : iterable, optional
If more than one positional arguments are given, those arguments are
automatically dotted together with ``name``.
Examples
--------
>>> resolve_name('astropy.utils.introspection.resolve_name')
<function resolve_name at 0x...>
>>> resolve_name('astropy', 'utils', 'introspection', 'resolve_name')
<function resolve_name at 0x...>
Raises
------
`ImportError`
If the module or named object is not found.
"""
additional_parts = '.'.join(additional_parts)
if additional_parts:
name = name + '.' + additional_parts
parts = name.split('.')
if len(parts) == 1:
# No dots in the name--just a straight up module import
cursor = 1
fromlist = []
else:
cursor = len(parts) - 1
fromlist = [parts[-1]]
module_name = parts[:cursor]
while cursor > 0:
try:
ret = __import__('.'.join(module_name), fromlist=fromlist)
break
except ImportError:
if cursor == 0:
raise
cursor -= 1
module_name = parts[:cursor]
fromlist = [parts[cursor]]
ret = ''
for part in parts[cursor:]:
try:
ret = getattr(ret, part)
except AttributeError:
raise ImportError(name)
return ret
@deprecated_renamed_argument('version_path', None, '5.0')
def minversion(module, version, inclusive=True, version_path='__version__'):
"""
Returns `True` if the specified Python module satisfies a minimum version
requirement, and `False` if not.
.. deprecated::
``version_path`` is not used anymore and is deprecated in
``astropy`` 5.0.
Parameters
----------
module : module or `str`
An imported module of which to check the version, or the name of
that module (in which case an import of that module is attempted--
if this fails `False` is returned).
version : `str`
The version as a string that this module must have at a minimum (e.g.
``'0.12'``).
inclusive : `bool`
The specified version meets the requirement inclusively (i.e. ``>=``)
as opposed to strictly greater than (default: `True`).
Examples
--------
>>> import astropy
>>> minversion(astropy, '0.4.4')
True
"""
if isinstance(module, types.ModuleType):
module_name = module.__name__
module_version = getattr(module, '__version__', None)
elif isinstance(module, str):
module_name = module
module_version = None
try:
module = resolve_name(module_name)
except ImportError:
return False
else:
raise ValueError('module argument must be an actual imported '
'module, or the import name of the module; '
f'got {repr(module)}')
if module_version is None:
try:
module_version = metadata.version(module_name)
except metadata.PackageNotFoundError:
# Maybe the distribution name is different from package name.
# Calling packages_distributions is costly so we do it only
# if necessary, as only a few packages don't have the same
# distribution name.
dist_names = packages_distributions()
module_version = metadata.version(dist_names[module_name][0])
if inclusive:
return Version(module_version) >= Version(version)
else:
return Version(module_version) > Version(version)
def find_current_module(depth=1, finddiff=False):
"""
Determines the module/package from which this function is called.
This function has two modes, determined by the ``finddiff`` option. it
will either simply go the requested number of frames up the call
stack (if ``finddiff`` is False), or it will go up the call stack until
it reaches a module that is *not* in a specified set.
Parameters
----------
depth : int
Specifies how far back to go in the call stack (0-indexed, so that
passing in 0 gives back `astropy.utils.misc`).
finddiff : bool or list
If False, the returned ``mod`` will just be ``depth`` frames up from
the current frame. Otherwise, the function will start at a frame
``depth`` up from current, and continue up the call stack to the
first module that is *different* from those in the provided list.
In this case, ``finddiff`` can be a list of modules or modules
names. Alternatively, it can be True, which will use the module
``depth`` call stack frames up as the module the returned module
most be different from.
Returns
-------
mod : module or None
The module object or None if the package cannot be found. The name of
the module is available as the ``__name__`` attribute of the returned
object (if it isn't None).
Raises
------
ValueError
If ``finddiff`` is a list with an invalid entry.
Examples
--------
The examples below assume that there are two modules in a package named
``pkg``. ``mod1.py``::
def find1():
from astropy.utils import find_current_module
print find_current_module(1).__name__
def find2():
from astropy.utils import find_current_module
cmod = find_current_module(2)
if cmod is None:
print 'None'
else:
print cmod.__name__
def find_diff():
from astropy.utils import find_current_module
print find_current_module(0,True).__name__
``mod2.py``::
def find():
from .mod1 import find2
find2()
With these modules in place, the following occurs::
>>> from pkg import mod1, mod2
>>> from astropy.utils import find_current_module
>>> mod1.find1()
pkg.mod1
>>> mod1.find2()
None
>>> mod2.find()
pkg.mod2
>>> find_current_module(0)
<module 'astropy.utils.misc' from 'astropy/utils/misc.py'>
>>> mod1.find_diff()
pkg.mod1
"""
frm = inspect.currentframe()
for i in range(depth):
frm = frm.f_back
if frm is None:
return None
if finddiff:
currmod = _get_module_from_frame(frm)
if finddiff is True:
diffmods = [currmod]
else:
diffmods = []
for fd in finddiff:
if inspect.ismodule(fd):
diffmods.append(fd)
elif isinstance(fd, str):
diffmods.append(importlib.import_module(fd))
elif fd is True:
diffmods.append(currmod)
else:
raise ValueError('invalid entry in finddiff')
while frm:
frmb = frm.f_back
modb = _get_module_from_frame(frmb)
if modb not in diffmods:
return modb
frm = frmb
else:
return _get_module_from_frame(frm)
def _get_module_from_frame(frm):
"""Uses inspect.getmodule() to get the module that the current frame's
code is running in.
However, this does not work reliably for code imported from a zip file,
so this provides a fallback mechanism for that case which is less
reliable in general, but more reliable than inspect.getmodule() for this
particular case.
"""
mod = inspect.getmodule(frm)
if mod is not None:
return mod
# Check to see if we're importing from a bundle file. First ensure that
# __file__ is available in globals; this is cheap to check to bail out
# immediately if this fails
if '__file__' in frm.f_globals and '__name__' in frm.f_globals:
filename = frm.f_globals['__file__']
# Using __file__ from the frame's globals and getting it into the form
# of an absolute path name with .py at the end works pretty well for
# looking up the module using the same means as inspect.getmodule
if filename[-4:].lower() in ('.pyc', '.pyo'):
filename = filename[:-4] + '.py'
filename = os.path.realpath(os.path.abspath(filename))
if filename in inspect.modulesbyfile:
return sys.modules.get(inspect.modulesbyfile[filename])
# On Windows, inspect.modulesbyfile appears to have filenames stored
# in lowercase, so we check for this case too.
if filename.lower() in inspect.modulesbyfile:
return sys.modules.get(inspect.modulesbyfile[filename.lower()])
# Otherwise there are still some even trickier things that might be possible
# to track down the module, but we'll leave those out unless we find a case
# where it's really necessary. So return None if the module is not found.
return None
def find_mod_objs(modname, onlylocals=False):
""" Returns all the public attributes of a module referenced by name.
.. note::
The returned list *not* include subpackages or modules of
``modname``, nor does it include private attributes (those that
begin with '_' or are not in `__all__`).
Parameters
----------
modname : str
The name of the module to search.
onlylocals : bool or list of str
If `True`, only attributes that are either members of ``modname`` OR
one of its modules or subpackages will be included. If it is a list
of strings, those specify the possible packages that will be
considered "local".
Returns
-------
localnames : list of str
A list of the names of the attributes as they are named in the
module ``modname`` .
fqnames : list of str
A list of the full qualified names of the attributes (e.g.,
``astropy.utils.introspection.find_mod_objs``). For attributes that are
simple variables, this is based on the local name, but for functions or
classes it can be different if they are actually defined elsewhere and
just referenced in ``modname``.
objs : list of objects
A list of the actual attributes themselves (in the same order as
the other arguments)
"""
mod = resolve_name(modname)
if hasattr(mod, '__all__'):
pkgitems = [(k, mod.__dict__[k]) for k in mod.__all__]
else:
pkgitems = [(k, mod.__dict__[k]) for k in dir(mod) if k[0] != '_']
# filter out modules and pull the names and objs out
ismodule = inspect.ismodule
localnames = [k for k, v in pkgitems if not ismodule(v)]
objs = [v for k, v in pkgitems if not ismodule(v)]
# fully qualified names can be determined from the object's module
fqnames = []
for obj, lnm in zip(objs, localnames):
if hasattr(obj, '__module__') and hasattr(obj, '__name__'):
fqnames.append(obj.__module__ + '.' + obj.__name__)
else:
fqnames.append(modname + '.' + lnm)
if onlylocals:
if onlylocals is True:
onlylocals = [modname]
valids = [any(fqn.startswith(nm) for nm in onlylocals) for fqn in fqnames]
localnames = [e for i, e in enumerate(localnames) if valids[i]]
fqnames = [e for i, e in enumerate(fqnames) if valids[i]]
objs = [e for i, e in enumerate(objs) if valids[i]]
return localnames, fqnames, objs
# Note: I would have preferred call this is_instancemethod, but this naming is
# for consistency with other functions in the `inspect` module
def isinstancemethod(cls, obj):
"""
Returns `True` if the given object is an instance method of the class
it is defined on (as opposed to a `staticmethod` or a `classmethod`).
This requires both the class the object is a member of as well as the
object itself in order to make this determination.
Parameters
----------
cls : `type`
The class on which this method was defined.
obj : `object`
A member of the provided class (the membership is not checked directly,
but this function will always return `False` if the given object is not
a member of the given class).
Examples
--------
>>> class MetaClass(type):
... def a_classmethod(cls): pass
...
>>> class MyClass(metaclass=MetaClass):
... def an_instancemethod(self): pass
...
... @classmethod
... def another_classmethod(cls): pass
...
... @staticmethod
... def a_staticmethod(): pass
...
>>> isinstancemethod(MyClass, MyClass.a_classmethod)
False
>>> isinstancemethod(MyClass, MyClass.another_classmethod)
False
>>> isinstancemethod(MyClass, MyClass.a_staticmethod)
False
>>> isinstancemethod(MyClass, MyClass.an_instancemethod)
True
"""
return _isinstancemethod(cls, obj)
def _isinstancemethod(cls, obj):
if not isinstance(obj, types.FunctionType):
return False
# Unfortunately it seems the easiest way to get to the original
# staticmethod object is to look in the class's __dict__, though we
# also need to look up the MRO in case the method is not in the given
# class's dict
name = obj.__name__
for basecls in cls.mro(): # This includes cls
if name in basecls.__dict__:
return not isinstance(basecls.__dict__[name], staticmethod)
# This shouldn't happen, though this is the most sensible response if
# it does.
raise AttributeError(name)
|
a523aaf8cdaea28e9c8c33ebc316f60b4f9fa0622b9fb7bcfd6549910835ac53 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains helper functions and classes for handling metadata.
"""
from functools import wraps
import warnings
from collections import OrderedDict
from collections.abc import Mapping
from copy import deepcopy
import numpy as np
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.misc import dtype_bytes_or_chars
__all__ = ['MergeConflictError', 'MergeConflictWarning', 'MERGE_STRATEGIES',
'common_dtype', 'MergePlus', 'MergeNpConcatenate', 'MergeStrategy',
'MergeStrategyMeta', 'enable_merge_strategies', 'merge', 'MetaData',
'MetaAttribute']
class MergeConflictError(TypeError):
pass
class MergeConflictWarning(AstropyWarning):
pass
MERGE_STRATEGIES = []
def common_dtype(arrs):
"""
Use numpy to find the common dtype for a list of ndarrays.
Only allow arrays within the following fundamental numpy data types:
``np.bool_``, ``np.object_``, ``np.number``, ``np.character``, ``np.void``
Parameters
----------
arrs : list of ndarray
Arrays for which to find the common dtype
Returns
-------
dtype_str : str
String representation of dytpe (dtype ``str`` attribute)
"""
def dtype(arr):
return getattr(arr, 'dtype', np.dtype('O'))
np_types = (np.bool_, np.object_, np.number, np.character, np.void)
uniq_types = {tuple(issubclass(dtype(arr).type, np_type) for np_type in np_types)
for arr in arrs}
if len(uniq_types) > 1:
# Embed into the exception the actual list of incompatible types.
incompat_types = [dtype(arr).name for arr in arrs]
tme = MergeConflictError(f'Arrays have incompatible types {incompat_types}')
tme._incompat_types = incompat_types
raise tme
arrs = [np.empty(1, dtype=dtype(arr)) for arr in arrs]
# For string-type arrays need to explicitly fill in non-zero
# values or the final arr_common = .. step is unpredictable.
for i, arr in enumerate(arrs):
if arr.dtype.kind in ('S', 'U'):
arrs[i] = [('0' if arr.dtype.kind == 'U' else b'0') *
dtype_bytes_or_chars(arr.dtype)]
arr_common = np.array([arr[0] for arr in arrs])
return arr_common.dtype.str if arr_common.dtype.names is None else arr_common.dtype.descr
class MergeStrategyMeta(type):
"""
Metaclass that registers MergeStrategy subclasses into the
MERGE_STRATEGIES registry.
"""
def __new__(mcls, name, bases, members):
cls = super().__new__(mcls, name, bases, members)
# Wrap ``merge`` classmethod to catch any exception and re-raise as
# MergeConflictError.
if 'merge' in members and isinstance(members['merge'], classmethod):
orig_merge = members['merge'].__func__
@wraps(orig_merge)
def merge(cls, left, right):
try:
return orig_merge(cls, left, right)
except Exception as err:
raise MergeConflictError(err)
cls.merge = classmethod(merge)
# Register merging class (except for base MergeStrategy class)
if 'types' in members:
types = members['types']
if isinstance(types, tuple):
types = [types]
for left, right in reversed(types):
MERGE_STRATEGIES.insert(0, (left, right, cls))
return cls
class MergeStrategy(metaclass=MergeStrategyMeta):
"""
Base class for defining a strategy for merging metadata from two
sources, left and right, into a single output.
The primary functionality for the class is the ``merge(cls, left, right)``
class method. This takes ``left`` and ``right`` side arguments and
returns a single merged output.
The first class attribute is ``types``. This is defined as a list of
(left_types, right_types) tuples that indicate for which input types the
merge strategy applies. In determining whether to apply this merge
strategy to a pair of (left, right) objects, a test is done:
``isinstance(left, left_types) and isinstance(right, right_types)``. For
example::
types = [(np.ndarray, np.ndarray), # Two ndarrays
(np.ndarray, (list, tuple)), # ndarray and (list or tuple)
((list, tuple), np.ndarray)] # (list or tuple) and ndarray
As a convenience, ``types`` can be defined as a single two-tuple instead of
a list of two-tuples, e.g. ``types = (np.ndarray, np.ndarray)``.
The other class attribute is ``enabled``, which defaults to ``False`` in
the base class. By defining a subclass of ``MergeStrategy`` the new merge
strategy is automatically registered to be available for use in
merging. However, by default the new merge strategy is *not enabled*. This
prevents inadvertently changing the behavior of unrelated code that is
performing metadata merge operations.
In most cases (particularly in library code that others might use) it is
recommended to leave custom strategies disabled and use the
`~astropy.utils.metadata.enable_merge_strategies` context manager to locally
enable the desired strategies. However, if one is confident that the
new strategy will not produce unexpected behavior, then one can globally
enable it by setting the ``enabled`` class attribute to ``True``.
Examples
--------
Here we define a custom merge strategy that takes an int or float on
the left and right sides and returns a list with the two values.
>>> from astropy.utils.metadata import MergeStrategy
>>> class MergeNumbersAsList(MergeStrategy):
... types = ((int, float), (int, float)) # (left_types, right_types)
...
... @classmethod
... def merge(cls, left, right):
... return [left, right]
"""
# Set ``enabled = True`` to globally enable applying this merge strategy.
# This is not generally recommended.
enabled = False
# types = [(left_types, right_types), ...]
class MergePlus(MergeStrategy):
"""
Merge ``left`` and ``right`` objects using the plus operator. This
merge strategy is globally enabled by default.
"""
types = [(list, list), (tuple, tuple)]
enabled = True
@classmethod
def merge(cls, left, right):
return left + right
class MergeNpConcatenate(MergeStrategy):
"""
Merge ``left`` and ``right`` objects using np.concatenate. This
merge strategy is globally enabled by default.
This will upcast a list or tuple to np.ndarray and the output is
always ndarray.
"""
types = [(np.ndarray, np.ndarray),
(np.ndarray, (list, tuple)),
((list, tuple), np.ndarray)]
enabled = True
@classmethod
def merge(cls, left, right):
left, right = np.asanyarray(left), np.asanyarray(right)
common_dtype([left, right]) # Ensure left and right have compatible dtype
return np.concatenate([left, right])
def _both_isinstance(left, right, cls):
return isinstance(left, cls) and isinstance(right, cls)
def _not_equal(left, right):
try:
return bool(left != right)
except Exception:
return True
class _EnableMergeStrategies:
def __init__(self, *merge_strategies):
self.merge_strategies = merge_strategies
self.orig_enabled = {}
for left_type, right_type, merge_strategy in MERGE_STRATEGIES:
if issubclass(merge_strategy, merge_strategies):
self.orig_enabled[merge_strategy] = merge_strategy.enabled
merge_strategy.enabled = True
def __enter__(self):
pass
def __exit__(self, type, value, tb):
for merge_strategy, enabled in self.orig_enabled.items():
merge_strategy.enabled = enabled
def enable_merge_strategies(*merge_strategies):
"""
Context manager to temporarily enable one or more custom metadata merge
strategies.
Examples
--------
Here we define a custom merge strategy that takes an int or float on
the left and right sides and returns a list with the two values.
>>> from astropy.utils.metadata import MergeStrategy
>>> class MergeNumbersAsList(MergeStrategy):
... types = ((int, float), # left side types
... (int, float)) # right side types
... @classmethod
... def merge(cls, left, right):
... return [left, right]
By defining this class the merge strategy is automatically registered to be
available for use in merging. However, by default new merge strategies are
*not enabled*. This prevents inadvertently changing the behavior of
unrelated code that is performing metadata merge operations.
In order to use the new merge strategy, use this context manager as in the
following example::
>>> from astropy.table import Table, vstack
>>> from astropy.utils.metadata import enable_merge_strategies
>>> t1 = Table([[1]], names=['a'])
>>> t2 = Table([[2]], names=['a'])
>>> t1.meta = {'m': 1}
>>> t2.meta = {'m': 2}
>>> with enable_merge_strategies(MergeNumbersAsList):
... t12 = vstack([t1, t2])
>>> t12.meta['m']
[1, 2]
One can supply further merge strategies as additional arguments to the
context manager.
As a convenience, the enabling operation is actually done by checking
whether the registered strategies are subclasses of the context manager
arguments. This means one can define a related set of merge strategies and
then enable them all at once by enabling the base class. As a trivial
example, *all* registered merge strategies can be enabled with::
>>> with enable_merge_strategies(MergeStrategy):
... t12 = vstack([t1, t2])
Parameters
----------
*merge_strategies : `~astropy.utils.metadata.MergeStrategy`
Merge strategies that will be enabled.
"""
return _EnableMergeStrategies(*merge_strategies)
def _warn_str_func(key, left, right):
out = ('Cannot merge meta key {0!r} types {1!r}'
' and {2!r}, choosing {0}={3!r}'
.format(key, type(left), type(right), right))
return out
def _error_str_func(key, left, right):
out = f'Cannot merge meta key {key!r} types {type(left)!r} and {type(right)!r}'
return out
def merge(left, right, merge_func=None, metadata_conflicts='warn',
warn_str_func=_warn_str_func,
error_str_func=_error_str_func):
"""
Merge the ``left`` and ``right`` metadata objects.
This is a simplistic and limited implementation at this point.
"""
if not _both_isinstance(left, right, dict):
raise MergeConflictError('Can only merge two dict-based objects')
out = deepcopy(left)
for key, val in right.items():
# If no conflict then insert val into out dict and continue
if key not in out:
out[key] = deepcopy(val)
continue
# There is a conflict that must be resolved
if _both_isinstance(left[key], right[key], dict):
out[key] = merge(left[key], right[key], merge_func,
metadata_conflicts=metadata_conflicts)
else:
try:
if merge_func is None:
for left_type, right_type, merge_cls in MERGE_STRATEGIES:
if not merge_cls.enabled:
continue
if (isinstance(left[key], left_type) and
isinstance(right[key], right_type)):
out[key] = merge_cls.merge(left[key], right[key])
break
else:
raise MergeConflictError
else:
out[key] = merge_func(left[key], right[key])
except MergeConflictError:
# Pick the metadata item that is not None, or they are both not
# None, then if they are equal, there is no conflict, and if
# they are different, there is a conflict and we pick the one
# on the right (or raise an error).
if left[key] is None:
# This may not seem necessary since out[key] gets set to
# right[key], but not all objects support != which is
# needed for one of the if clauses.
out[key] = right[key]
elif right[key] is None:
out[key] = left[key]
elif _not_equal(left[key], right[key]):
if metadata_conflicts == 'warn':
warnings.warn(warn_str_func(key, left[key], right[key]),
MergeConflictWarning)
elif metadata_conflicts == 'error':
raise MergeConflictError(error_str_func(key, left[key], right[key]))
elif metadata_conflicts != 'silent':
raise ValueError('metadata_conflicts argument must be one '
'of "silent", "warn", or "error"')
out[key] = right[key]
else:
out[key] = right[key]
return out
class MetaData:
"""
A descriptor for classes that have a ``meta`` property.
This can be set to any valid `~collections.abc.Mapping`.
Parameters
----------
doc : `str`, optional
Documentation for the attribute of the class.
Default is ``""``.
.. versionadded:: 1.2
copy : `bool`, optional
If ``True`` the the value is deepcopied before setting, otherwise it
is saved as reference.
Default is ``True``.
.. versionadded:: 1.2
"""
def __init__(self, doc="", copy=True):
self.__doc__ = doc
self.copy = copy
def __get__(self, instance, owner):
if instance is None:
return self
if not hasattr(instance, '_meta'):
instance._meta = OrderedDict()
return instance._meta
def __set__(self, instance, value):
if value is None:
instance._meta = OrderedDict()
else:
if isinstance(value, Mapping):
if self.copy:
instance._meta = deepcopy(value)
else:
instance._meta = value
else:
raise TypeError("meta attribute must be dict-like")
class MetaAttribute:
"""
Descriptor to define custom attribute which gets stored in the object
``meta`` dict and can have a defined default.
This descriptor is intended to provide a convenient way to add attributes
to a subclass of a complex class such as ``Table`` or ``NDData``.
This requires that the object has an attribute ``meta`` which is a
dict-like object. The value of the MetaAttribute will be stored in a
new dict meta['__attributes__'] that is created when required.
Classes that define MetaAttributes are encouraged to support initializing
the attributes via the class ``__init__``. For example::
for attr in list(kwargs):
descr = getattr(self.__class__, attr, None)
if isinstance(descr, MetaAttribute):
setattr(self, attr, kwargs.pop(attr))
The name of a ``MetaAttribute`` cannot be the same as any of the following:
- Keyword argument in the owner class ``__init__``
- Method or attribute of the "parent class", where the parent class is
taken to be ``owner.__mro__[1]``.
:param default: default value
"""
def __init__(self, default=None):
self.default = default
def __get__(self, instance, owner):
# When called without an instance, return self to allow access
# to descriptor attributes.
if instance is None:
return self
# If default is None and value has not been set already then return None
# without doing touching meta['__attributes__'] at all. This helps e.g.
# with the Table._hidden_columns attribute so it doesn't auto-create
# meta['__attributes__'] always.
if (self.default is None
and self.name not in instance.meta.get('__attributes__', {})):
return None
# Get the __attributes__ dict and create if not there already.
attributes = instance.meta.setdefault('__attributes__', {})
try:
value = attributes[self.name]
except KeyError:
if self.default is not None:
attributes[self.name] = deepcopy(self.default)
# Return either specified default or None
value = attributes.get(self.name)
return value
def __set__(self, instance, value):
# Get the __attributes__ dict and create if not there already.
attributes = instance.meta.setdefault('__attributes__', {})
attributes[self.name] = value
def __delete__(self, instance):
# Remove this attribute from meta['__attributes__'] if it exists.
if '__attributes__' in instance.meta:
attrs = instance.meta['__attributes__']
if self.name in attrs:
del attrs[self.name]
# If this was the last attribute then remove the meta key as well
if not attrs:
del instance.meta['__attributes__']
def __set_name__(self, owner, name):
import inspect
params = [param.name for param in inspect.signature(owner).parameters.values()
if param.kind not in (inspect.Parameter.VAR_KEYWORD,
inspect.Parameter.VAR_POSITIONAL)]
# Reject names from existing params or best guess at parent class
if name in params or hasattr(owner.__mro__[1], name):
raise ValueError(f'{name} not allowed as {self.__class__.__name__}')
self.name = name
def __repr__(self):
return f'<{self.__class__.__name__} name={self.name} default={self.default}>'
|
850a9b3251b525cc3c16e0b6ee40ecf7a0818be91cdb4e856e0e269cc7d0ca25 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""This module contains functions and methods that relate to the DataInfo class
which provides a container for informational attributes as well as summary info
methods.
A DataInfo object is attached to the Quantity, SkyCoord, and Time classes in
astropy. Here it allows those classes to be used in Tables and uniformly carry
table column attributes such as name, format, dtype, meta, and description.
"""
# Note: these functions and classes are tested extensively in astropy table
# tests via their use in providing mixin column info, and in
# astropy/tests/test_info for providing table and column info summary data.
import os
import re
import sys
import weakref
import warnings
from io import StringIO
from copy import deepcopy
from functools import partial
from collections import OrderedDict
from contextlib import contextmanager
import numpy as np
from . import metadata
__all__ = ['data_info_factory', 'dtype_info_name', 'BaseColumnInfo',
'DataInfo', 'MixinInfo', 'ParentDtypeInfo']
# Tuple of filterwarnings kwargs to ignore when calling info
IGNORE_WARNINGS = (dict(category=RuntimeWarning, message='All-NaN|'
'Mean of empty slice|Degrees of freedom <= 0|'
'invalid value encountered in sqrt'),)
@contextmanager
def serialize_context_as(context):
"""Set context for serialization.
This will allow downstream code to understand the context in which a column
is being serialized. Objects like Time or SkyCoord will have different
default serialization representations depending on context.
Parameters
----------
context : str
Context name, e.g. 'fits', 'hdf5', 'parquet', 'ecsv', 'yaml'
"""
old_context = BaseColumnInfo._serialize_context
BaseColumnInfo._serialize_context = context
try:
yield
finally:
BaseColumnInfo._serialize_context = old_context
def dtype_info_name(dtype):
"""Return a human-oriented string name of the ``dtype`` arg.
This can be use by astropy methods that present type information about
a data object.
The output is mostly equivalent to ``dtype.name`` which takes the form
<type_name>[B] where <type_name> is like ``int`` or ``bool`` and [B] is an
optional number of bits which gets included only for numeric types.
The output is shown below for ``bytes`` and ``str`` types, with <N> being
the number of characters. This representation corresponds to the Python
type that matches the dtype::
Numpy S<N> U<N>
Python bytes<N> str<N>
Parameters
----------
dtype : str, `~numpy.dtype`, type
Input as an object that can be converted via :class:`numpy.dtype`.
Returns
-------
dtype_info_name : str
String name of ``dtype``
"""
dtype = np.dtype(dtype)
if dtype.names is not None:
return '({})'.format(', '.join(dtype_info_name(dt[0])
for dt in dtype.fields.values()))
if dtype.subdtype is not None:
dtype, shape = dtype.subdtype
else:
shape = ()
if dtype.kind in ('S', 'U'):
type_name = 'bytes' if dtype.kind == 'S' else 'str'
length = re.search(r'(\d+)', dtype.str).group(1)
out = type_name + length
else:
out = dtype.name
if shape:
out += f"[{','.join(str(n) for n in shape)}]"
return out
def data_info_factory(names, funcs):
"""
Factory to create a function that can be used as an ``option``
for outputting data object summary information.
Examples
--------
>>> from astropy.utils.data_info import data_info_factory
>>> from astropy.table import Column
>>> c = Column([4., 3., 2., 1.])
>>> mystats = data_info_factory(names=['min', 'median', 'max'],
... funcs=[np.min, np.median, np.max])
>>> c.info(option=mystats)
min = 1
median = 2.5
max = 4
n_bad = 0
length = 4
Parameters
----------
names : list
List of information attribute names
funcs : list
List of functions that compute the corresponding information attribute
Returns
-------
func : function
Function that can be used as a data info option
"""
def func(dat):
outs = []
for name, func in zip(names, funcs):
try:
if isinstance(func, str):
out = getattr(dat, func)()
else:
out = func(dat)
except Exception:
outs.append('--')
else:
try:
outs.append(f'{out:g}')
except (TypeError, ValueError):
outs.append(str(out))
return OrderedDict(zip(names, outs))
return func
def _get_obj_attrs_map(obj, attrs):
"""
Get the values for object ``attrs`` and return as a dict. This
ignores any attributes that are None. In the context of serializing
the supported core astropy classes this conversion will succeed and
results in more succinct and less python-specific YAML.
"""
out = {}
for attr in attrs:
val = getattr(obj, attr, None)
if val is not None:
out[attr] = val
return out
def _get_data_attribute(dat, attr=None):
"""
Get a data object attribute for the ``attributes`` info summary method
"""
if attr == 'class':
val = type(dat).__name__
elif attr == 'dtype':
val = dtype_info_name(dat.info.dtype)
elif attr == 'shape':
datshape = dat.shape[1:]
val = datshape if datshape else ''
else:
val = getattr(dat.info, attr)
if val is None:
val = ''
return str(val)
class InfoAttribute:
def __init__(self, attr, default=None):
self.attr = attr
self.default = default
def __get__(self, instance, owner_cls):
if instance is None:
return self
return instance._attrs.get(self.attr, self.default)
def __set__(self, instance, value):
if instance is None:
# This is an unbound descriptor on the class
raise ValueError('cannot set unbound descriptor')
instance._attrs[self.attr] = value
class ParentAttribute:
def __init__(self, attr):
self.attr = attr
def __get__(self, instance, owner_cls):
if instance is None:
return self
return getattr(instance._parent, self.attr)
def __set__(self, instance, value):
if instance is None:
# This is an unbound descriptor on the class
raise ValueError('cannot set unbound descriptor')
setattr(instance._parent, self.attr, value)
class DataInfoMeta(type):
def __new__(mcls, name, bases, dct):
# Ensure that we do not gain a __dict__, which would mean
# arbitrary attributes could be set.
dct.setdefault('__slots__', [])
return super().__new__(mcls, name, bases, dct)
def __init__(cls, name, bases, dct):
super().__init__(name, bases, dct)
# Define default getters/setters for attributes, if needed.
for attr in cls.attr_names:
if attr not in dct:
# If not defined explicitly for this class, did any of
# its superclasses define it, and, if so, was this an
# automatically defined look-up-on-parent attribute?
cls_attr = getattr(cls, attr, None)
if attr in cls.attrs_from_parent:
# If the attribute is supposed to be stored on the parent,
# and that is stated by this class yet it was not the case
# on the superclass, override it.
if 'attrs_from_parent' in dct and not isinstance(cls_attr, ParentAttribute):
setattr(cls, attr, ParentAttribute(attr))
elif not cls_attr or isinstance(cls_attr, ParentAttribute):
# If the attribute is not meant to be stored on the parent,
# and if it was not defined already or was previously defined
# as an attribute on the parent, define a regular
# look-up-on-info attribute
setattr(cls, attr,
InfoAttribute(attr, cls._attr_defaults.get(attr)))
class DataInfo(metaclass=DataInfoMeta):
"""
Descriptor that data classes use to add an ``info`` attribute for storing
data attributes in a uniform and portable way. Note that it *must* be
called ``info`` so that the DataInfo() object can be stored in the
``instance`` using the ``info`` key. Because owner_cls.x is a descriptor,
Python doesn't use __dict__['x'] normally, and the descriptor can safely
store stuff there. Thanks to
https://nbviewer.jupyter.org/urls/gist.github.com/ChrisBeaumont/5758381/raw/descriptor_writeup.ipynb
for this trick that works for non-hashable classes.
Parameters
----------
bound : bool
If True this is a descriptor attribute in a class definition, else it
is a DataInfo() object that is bound to a data object instance. Default is False.
"""
_stats = ['mean', 'std', 'min', 'max']
attrs_from_parent = set()
attr_names = {'name', 'unit', 'dtype', 'format', 'description', 'meta'}
_attr_defaults = {'dtype': np.dtype('O')}
_attrs_no_copy = set()
_info_summary_attrs = ('dtype', 'shape', 'unit', 'format', 'description', 'class')
__slots__ = ['_parent_cls', '_parent_ref', '_attrs']
# This specifies the list of object attributes which must be stored in
# order to re-create the object after serialization. This is independent
# of normal `info` attributes like name or description. Subclasses will
# generally either define this statically (QuantityInfo) or dynamically
# (SkyCoordInfo). These attributes may be scalars or arrays. If arrays
# that match the object length they will be serialized as an independent
# column.
_represent_as_dict_attrs = ()
# This specifies attributes which are to be provided to the class
# initializer as ordered args instead of keyword args. This is needed
# for Quantity subclasses where the keyword for data varies (e.g.
# between Quantity and Angle).
_construct_from_dict_args = ()
# This specifies the name of an attribute which is the "primary" data.
# Then when representing as columns
# (table.serialize._represent_mixin_as_column) the output for this
# attribute will be written with the just name of the mixin instead of the
# usual "<name>.<attr>".
_represent_as_dict_primary_data = None
def __init__(self, bound=False):
# If bound to a data object instance then create the dict of attributes
# which stores the info attribute values. Default of None for "unset"
# except for dtype where the default is object.
if bound:
self._attrs = {}
@property
def _parent(self):
try:
parent = self._parent_ref()
except AttributeError:
return None
if parent is None:
raise AttributeError("""\
failed to access "info" attribute on a temporary object.
It looks like you have done something like ``col[3:5].info`` or
``col.quantity.info``, i.e. you accessed ``info`` from a temporary slice
object that only exists momentarily. This has failed because the reference to
that temporary object is now lost. Instead force a permanent reference (e.g.
``c = col[3:5]`` followed by ``c.info``).""")
return parent
def __get__(self, instance, owner_cls):
if instance is None:
# This is an unbound descriptor on the class
self._parent_cls = owner_cls
return self
info = instance.__dict__.get('info')
if info is None:
info = instance.__dict__['info'] = self.__class__(bound=True)
# We set _parent_ref on every call, since if one makes copies of
# instances, 'info' will be copied as well, which will lose the
# reference.
info._parent_ref = weakref.ref(instance)
return info
def __set__(self, instance, value):
if instance is None:
# This is an unbound descriptor on the class
raise ValueError('cannot set unbound descriptor')
if isinstance(value, DataInfo):
info = instance.__dict__['info'] = self.__class__(bound=True)
attr_names = info.attr_names
if value.__class__ is self.__class__:
# For same class, attributes are guaranteed to be stored in
# _attrs, so speed matters up by not accessing defaults.
# Doing this before difference in for loop helps speed.
attr_names = attr_names & set(value._attrs) # NOT in-place!
else:
# For different classes, copy over the attributes in common.
attr_names = attr_names & (value.attr_names - value._attrs_no_copy)
for attr in attr_names - info.attrs_from_parent - info._attrs_no_copy:
info._attrs[attr] = deepcopy(getattr(value, attr))
else:
raise TypeError('info must be set with a DataInfo instance')
def __getstate__(self):
return self._attrs
def __setstate__(self, state):
self._attrs = state
def _represent_as_dict(self, attrs=None):
"""Get the values for the parent ``attrs`` and return as a dict.
By default, uses '_represent_as_dict_attrs'.
"""
if attrs is None:
attrs = self._represent_as_dict_attrs
return _get_obj_attrs_map(self._parent, attrs)
def _construct_from_dict(self, map):
args = [map.pop(attr) for attr in self._construct_from_dict_args]
return self._parent_cls(*args, **map)
info_summary_attributes = staticmethod(
data_info_factory(names=_info_summary_attrs,
funcs=[partial(_get_data_attribute, attr=attr)
for attr in _info_summary_attrs]))
# No nan* methods in numpy < 1.8
info_summary_stats = staticmethod(
data_info_factory(names=_stats,
funcs=[getattr(np, 'nan' + stat)
for stat in _stats]))
def __call__(self, option='attributes', out=''):
"""
Write summary information about data object to the ``out`` filehandle.
By default this prints to standard output via sys.stdout.
The ``option`` argument specifies what type of information
to include. This can be a string, a function, or a list of
strings or functions. Built-in options are:
- ``attributes``: data object attributes like ``dtype`` and ``format``
- ``stats``: basic statistics: min, mean, and max
If a function is specified then that function will be called with the
data object as its single argument. The function must return an
OrderedDict containing the information attributes.
If a list is provided then the information attributes will be
appended for each of the options, in order.
Examples
--------
>>> from astropy.table import Column
>>> c = Column([1, 2], unit='m', dtype='int32')
>>> c.info()
dtype = int32
unit = m
class = Column
n_bad = 0
length = 2
>>> c.info(['attributes', 'stats'])
dtype = int32
unit = m
class = Column
mean = 1.5
std = 0.5
min = 1
max = 2
n_bad = 0
length = 2
Parameters
----------
option : str, callable, list of (str or callable)
Info option, defaults to 'attributes'.
out : file-like, None
Output destination, defaults to sys.stdout. If None then the
OrderedDict with information attributes is returned
Returns
-------
info : `~collections.OrderedDict` or None
`~collections.OrderedDict` if out==None else None
"""
if out == '':
out = sys.stdout
dat = self._parent
info = OrderedDict()
name = dat.info.name
if name is not None:
info['name'] = name
options = option if isinstance(option, (list, tuple)) else [option]
for option in options:
if isinstance(option, str):
if hasattr(self, 'info_summary_' + option):
option = getattr(self, 'info_summary_' + option)
else:
raise ValueError('option={} is not an allowed information type'
.format(option))
with warnings.catch_warnings():
for ignore_kwargs in IGNORE_WARNINGS:
warnings.filterwarnings('ignore', **ignore_kwargs)
info.update(option(dat))
if hasattr(dat, 'mask'):
n_bad = np.count_nonzero(dat.mask)
else:
try:
n_bad = np.count_nonzero(np.isinf(dat) | np.isnan(dat))
except Exception:
n_bad = 0
info['n_bad'] = n_bad
try:
info['length'] = len(dat)
except (TypeError, IndexError):
pass
if out is None:
return info
for key, val in info.items():
if val != '':
out.write(f'{key} = {val}' + os.linesep)
def __repr__(self):
if self._parent is None:
return super().__repr__()
out = StringIO()
self.__call__(out=out)
return out.getvalue()
class BaseColumnInfo(DataInfo):
"""
Base info class for anything that can be a column in an astropy
Table. There are at least two classes that inherit from this:
ColumnInfo: for native astropy Column / MaskedColumn objects
MixinInfo: for mixin column objects
Note that this class is defined here so that mixins can use it
without importing the table package.
"""
attr_names = DataInfo.attr_names | {'parent_table', 'indices'}
_attrs_no_copy = {'parent_table', 'indices'}
# Context for serialization. This can be set temporarily via
# ``serialize_context_as(context)`` context manager to allow downstream
# code to understand the context in which a column is being serialized.
# Typical values are 'fits', 'hdf5', 'parquet', 'ecsv', 'yaml'. Objects
# like Time or SkyCoord will have different default serialization
# representations depending on context.
_serialize_context = None
__slots__ = ['_format_funcs', '_copy_indices']
@property
def parent_table(self):
value = self._attrs.get('parent_table')
if callable(value):
value = value()
return value
@parent_table.setter
def parent_table(self, parent_table):
if parent_table is None:
self._attrs.pop('parent_table', None)
else:
parent_table = weakref.ref(parent_table)
self._attrs['parent_table'] = parent_table
def __init__(self, bound=False):
super().__init__(bound=bound)
# If bound to a data object instance then add a _format_funcs dict
# for caching functions for print formatting.
if bound:
self._format_funcs = {}
def __set__(self, instance, value):
# For Table columns do not set `info` when the instance is a scalar.
try:
if not instance.shape:
return
except AttributeError:
pass
super().__set__(instance, value)
def iter_str_vals(self):
"""
This is a mixin-safe version of Column.iter_str_vals.
"""
col = self._parent
if self.parent_table is None:
from astropy.table.column import FORMATTER as formatter
else:
formatter = self.parent_table.formatter
_pformat_col_iter = formatter._pformat_col_iter
yield from _pformat_col_iter(col, -1, False, False, {})
@property
def indices(self):
# Implementation note: the auto-generation as an InfoAttribute cannot
# be used here, since on access, one should not just return the
# default (empty list is this case), but set _attrs['indices'] so that
# if the list is appended to, it is registered here.
return self._attrs.setdefault('indices', [])
@indices.setter
def indices(self, indices):
self._attrs['indices'] = indices
def adjust_indices(self, index, value, col_len):
'''
Adjust info indices after column modification.
Parameters
----------
index : slice, int, list, or ndarray
Element(s) of column to modify. This parameter can
be a single row number, a list of row numbers, an
ndarray of row numbers, a boolean ndarray (a mask),
or a column slice.
value : int, list, or ndarray
New value(s) to insert
col_len : int
Length of the column
'''
if not self.indices:
return
if isinstance(index, slice):
# run through each key in slice
t = index.indices(col_len)
keys = list(range(*t))
elif isinstance(index, np.ndarray) and index.dtype.kind == 'b':
# boolean mask
keys = np.where(index)[0]
else: # single int
keys = [index]
value = np.atleast_1d(value) # turn array(x) into array([x])
if value.size == 1:
# repeat single value
value = list(value) * len(keys)
for key, val in zip(keys, value):
for col_index in self.indices:
col_index.replace(key, self.name, val)
def slice_indices(self, col_slice, item, col_len):
'''
Given a sliced object, modify its indices
to correctly represent the slice.
Parameters
----------
col_slice : `~astropy.table.Column` or mixin
Sliced object. If not a column, it must be a valid mixin, see
https://docs.astropy.org/en/stable/table/mixin_columns.html
item : slice, list, or ndarray
Slice used to create col_slice
col_len : int
Length of original object
'''
from astropy.table.sorted_array import SortedArray
if not getattr(self, '_copy_indices', True):
# Necessary because MaskedArray will perform a shallow copy
col_slice.info.indices = []
return col_slice
elif isinstance(item, slice):
col_slice.info.indices = [x[item] for x in self.indices]
elif self.indices:
if isinstance(item, np.ndarray) and item.dtype.kind == 'b':
# boolean mask
item = np.where(item)[0]
# Empirical testing suggests that recreating a BST/RBT index is
# more effective than relabelling when less than ~60% of
# the total number of rows are involved, and is in general
# more effective for SortedArray.
small = len(item) <= 0.6 * col_len
col_slice.info.indices = []
for index in self.indices:
if small or isinstance(index, SortedArray):
new_index = index.get_slice(col_slice, item)
else:
new_index = deepcopy(index)
new_index.replace_rows(item)
col_slice.info.indices.append(new_index)
return col_slice
@staticmethod
def merge_cols_attributes(cols, metadata_conflicts, name, attrs):
"""
Utility method to merge and validate the attributes ``attrs`` for the
input table columns ``cols``.
Note that ``dtype`` and ``shape`` attributes are handled specially.
These should not be passed in ``attrs`` but will always be in the
returned dict of merged attributes.
Parameters
----------
cols : list
List of input Table column objects
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
attrs : list
List of attribute names to be merged
Returns
-------
attrs : dict
Of merged attributes.
"""
from astropy.table.np_utils import TableMergeError
def warn_str_func(key, left, right):
out = ("In merged column '{}' the '{}' attribute does not match "
"({} != {}). Using {} for merged output"
.format(name, key, left, right, right))
return out
def getattrs(col):
return {attr: getattr(col.info, attr) for attr in attrs
if getattr(col.info, attr, None) is not None}
out = getattrs(cols[0])
for col in cols[1:]:
out = metadata.merge(out, getattrs(col), metadata_conflicts=metadata_conflicts,
warn_str_func=warn_str_func)
# Output dtype is the superset of all dtypes in in_cols
out['dtype'] = metadata.common_dtype(cols)
# Make sure all input shapes are the same
uniq_shapes = {col.shape[1:] for col in cols}
if len(uniq_shapes) != 1:
raise TableMergeError('columns have different shapes')
out['shape'] = uniq_shapes.pop()
# "Merged" output name is the supplied name
if name is not None:
out['name'] = name
return out
def get_sortable_arrays(self):
"""
Return a list of arrays which can be lexically sorted to represent
the order of the parent column.
The base method raises NotImplementedError and must be overridden.
Returns
-------
arrays : list of ndarray
"""
raise NotImplementedError(f'column {self.name} is not sortable')
class MixinInfo(BaseColumnInfo):
@property
def name(self):
return self._attrs.get('name')
@name.setter
def name(self, name):
# For mixin columns that live within a table, rename the column in the
# table when setting the name attribute. This mirrors the same
# functionality in the BaseColumn class.
if self.parent_table is not None:
new_name = None if name is None else str(name)
self.parent_table.columns._rename_column(self.name, new_name)
self._attrs['name'] = name
@property
def groups(self):
# This implementation for mixin columns essentially matches the Column
# property definition. `groups` is a read-only property here and
# depends on the parent table of the column having `groups`. This will
# allow aggregating mixins as long as they support those operations.
from astropy.table import groups
return self._attrs.setdefault('groups', groups.ColumnGroups(self._parent))
class ParentDtypeInfo(MixinInfo):
"""Mixin that gets info.dtype from parent"""
attrs_from_parent = {'dtype'} # dtype and unit taken from parent
|
e64c19f220ec3a7efb3073f208effb7d20e7dc992979a1c7173f94b09ab103dd | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Wrappers for PLY to provide thread safety.
"""
import contextlib
import functools
import re
import os
import threading
__all__ = ['lex', 'ThreadSafeParser', 'yacc']
_TAB_HEADER = """# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file was automatically generated from ply. To re-generate this file,
# remove it from this folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest {package}
#
# You can then commit the changes to this file.
"""
_LOCK = threading.RLock()
def _add_tab_header(filename, package):
with open(filename) as f:
contents = f.read()
with open(filename, 'w') as f:
f.write(_TAB_HEADER.format(package=package))
f.write(contents)
@contextlib.contextmanager
def _patch_get_caller_module_dict(module):
"""Temporarily replace the module's get_caller_module_dict.
This is a function inside ``ply.lex`` and ``ply.yacc`` (each has a copy)
that is used to retrieve the caller's local symbols. Here, we patch the
function to instead retrieve the grandparent's local symbols to account
for a wrapper layer.
"""
original = module.get_caller_module_dict
@functools.wraps(original)
def wrapper(levels):
# Add 2, not 1, because the wrapper itself adds another level
return original(levels + 2)
module.get_caller_module_dict = wrapper
yield
module.get_caller_module_dict = original
def lex(lextab, package, reflags=int(re.VERBOSE)):
"""Create a lexer from local variables.
It automatically compiles the lexer in optimized mode, writing to
``lextab`` in the same directory as the calling file.
This function is thread-safe. The returned lexer is *not* thread-safe, but
if it is used exclusively with a single parser returned by :func:`yacc`
then it will be safe.
It is only intended to work with lexers defined within the calling
function, rather than at class or module scope.
Parameters
----------
lextab : str
Name for the file to write with the generated tables, if it does not
already exist (without ``.py`` suffix).
package : str
Name of a test package which should be run with pytest to regenerate
the output file. This is inserted into a comment in the generated
file.
reflags : int
Passed to ``ply.lex``.
"""
from astropy.extern.ply import lex
caller_file = lex.get_caller_module_dict(2)['__file__']
lextab_filename = os.path.join(os.path.dirname(caller_file), lextab + '.py')
with _LOCK:
lextab_exists = os.path.exists(lextab_filename)
with _patch_get_caller_module_dict(lex):
lexer = lex.lex(optimize=True, lextab=lextab,
outputdir=os.path.dirname(caller_file),
reflags=reflags)
if not lextab_exists:
_add_tab_header(lextab_filename, package)
return lexer
class ThreadSafeParser:
"""Wrap a parser produced by ``ply.yacc.yacc``.
It provides a :meth:`parse` method that is thread-safe.
"""
def __init__(self, parser):
self.parser = parser
self._lock = threading.RLock()
def parse(self, *args, **kwargs):
"""Run the wrapped parser, with a lock to ensure serialization."""
with self._lock:
return self.parser.parse(*args, **kwargs)
def yacc(tabmodule, package):
"""Create a parser from local variables.
It automatically compiles the parser in optimized mode, writing to
``tabmodule`` in the same directory as the calling file.
This function is thread-safe, and the returned parser is also thread-safe,
provided that it does not share a lexer with any other parser.
It is only intended to work with parsers defined within the calling
function, rather than at class or module scope.
Parameters
----------
tabmodule : str
Name for the file to write with the generated tables, if it does not
already exist (without ``.py`` suffix).
package : str
Name of a test package which should be run with pytest to regenerate
the output file. This is inserted into a comment in the generated
file.
"""
from astropy.extern.ply import yacc
caller_file = yacc.get_caller_module_dict(2)['__file__']
tab_filename = os.path.join(os.path.dirname(caller_file), tabmodule + '.py')
with _LOCK:
tab_exists = os.path.exists(tab_filename)
with _patch_get_caller_module_dict(yacc):
parser = yacc.yacc(tabmodule=tabmodule,
outputdir=os.path.dirname(caller_file),
debug=False, optimize=True, write_tables=True)
if not tab_exists:
_add_tab_header(tab_filename, package)
return ThreadSafeParser(parser)
|
13269084e7493cb18bdd1ea11ef627c757989cd1234530b8f6795cf99bdfe4df | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Functions for accessing, downloading, and caching data files."""
import atexit
import contextlib
import errno
import fnmatch
import functools
import hashlib
import os
import io
import re
import shutil
# import ssl moved inside functions using ssl to avoid import failure
# when running in pyodide/Emscripten
import sys
import urllib.request
import urllib.error
import urllib.parse
import zipfile
import ftplib
from tempfile import NamedTemporaryFile, gettempdir, TemporaryDirectory, mkdtemp
from warnings import warn
try:
import certifi
except ImportError:
# certifi support is optional; when available it will be used for TLS/SSL
# downloads
certifi = None
import astropy.config.paths
from astropy import config as _config
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.introspection import find_current_module, resolve_name
# Order here determines order in the autosummary
__all__ = [
'Conf', 'conf',
'download_file', 'download_files_in_parallel',
'get_readable_fileobj',
'get_pkg_data_fileobj', 'get_pkg_data_filename',
'get_pkg_data_contents', 'get_pkg_data_fileobjs',
'get_pkg_data_filenames', 'get_pkg_data_path',
'is_url', 'is_url_in_cache', 'get_cached_urls',
'cache_total_size', 'cache_contents',
'export_download_cache', 'import_download_cache', 'import_file_to_cache',
'check_download_cache',
'clear_download_cache',
'compute_hash',
'get_free_space_in_dir',
'check_free_space_in_dir',
'get_file_contents',
'CacheMissingWarning',
"CacheDamaged"
]
_dataurls_to_alias = {}
class _NonClosingBufferedReader(io.BufferedReader):
def __del__(self):
try:
# NOTE: self.raw will not be closed, but left in the state
# it was in at detactment
self.detach()
except Exception:
pass
class _NonClosingTextIOWrapper(io.TextIOWrapper):
def __del__(self):
try:
# NOTE: self.stream will not be closed, but left in the state
# it was in at detactment
self.detach()
except Exception:
pass
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.utils.data`.
"""
dataurl = _config.ConfigItem(
'http://data.astropy.org/',
'Primary URL for astropy remote data site.')
dataurl_mirror = _config.ConfigItem(
'http://www.astropy.org/astropy-data/',
'Mirror URL for astropy remote data site.')
default_http_user_agent = _config.ConfigItem(
'astropy',
'Default User-Agent for HTTP request headers. This can be overwritten '
'for a particular call via http_headers option, where available. '
'This only provides the default value when not set by https_headers.')
remote_timeout = _config.ConfigItem(
10.,
'Time to wait for remote data queries (in seconds).',
aliases=['astropy.coordinates.name_resolve.name_resolve_timeout'])
allow_internet = _config.ConfigItem(
True,
'If False, prevents any attempt to download from Internet.')
compute_hash_block_size = _config.ConfigItem(
2 ** 16, # 64K
'Block size for computing file hashes.')
download_block_size = _config.ConfigItem(
2 ** 16, # 64K
'Number of bytes of remote data to download per step.')
delete_temporary_downloads_at_exit = _config.ConfigItem(
True,
'If True, temporary download files created when the cache is '
'inaccessible will be deleted at the end of the python session.')
conf = Conf()
class CacheMissingWarning(AstropyWarning):
"""
This warning indicates the standard cache directory is not accessible, with
the first argument providing the warning message. If args[1] is present, it
is a filename indicating the path to a temporary file that was created to
store a remote data download in the absence of the cache.
"""
def is_url(string):
"""
Test whether a string is a valid URL for :func:`download_file`.
Parameters
----------
string : str
The string to test.
Returns
-------
status : bool
String is URL or not.
"""
url = urllib.parse.urlparse(string)
# we can't just check that url.scheme is not an empty string, because
# file paths in windows would return a non-empty scheme (e.g. e:\\
# returns 'e').
return url.scheme.lower() in ['http', 'https', 'ftp', 'sftp', 'ssh', 'file']
# Backward compatibility because some downstream packages allegedly uses it.
_is_url = is_url
def _is_inside(path, parent_path):
# We have to try realpath too to avoid issues with symlinks, but we leave
# abspath because some systems like debian have the absolute path (with no
# symlinks followed) match, but the real directories in different
# locations, so need to try both cases.
return os.path.abspath(path).startswith(os.path.abspath(parent_path)) \
or os.path.realpath(path).startswith(os.path.realpath(parent_path))
@contextlib.contextmanager
def get_readable_fileobj(name_or_obj, encoding=None, cache=False,
show_progress=True, remote_timeout=None,
sources=None, http_headers=None):
"""Yield a readable, seekable file-like object from a file or URL.
This supports passing filenames, URLs, and readable file-like objects,
any of which can be compressed in gzip, bzip2 or lzma (xz) if the
appropriate compression libraries are provided by the Python installation.
Notes
-----
This function is a context manager, and should be used for example
as::
with get_readable_fileobj('file.dat') as f:
contents = f.read()
If a URL is provided and the cache is in use, the provided URL will be the
name used in the cache. The contents may already be stored in the cache
under this URL provided, they may be downloaded from this URL, or they may
be downloaded from one of the locations listed in ``sources``. See
`~download_file` for details.
Parameters
----------
name_or_obj : str or file-like
The filename of the file to access (if given as a string), or
the file-like object to access.
If a file-like object, it must be opened in binary mode.
encoding : str, optional
When `None` (default), returns a file-like object with a
``read`` method that returns `str` (``unicode``) objects, using
`locale.getpreferredencoding` as an encoding. This matches
the default behavior of the built-in `open` when no ``mode``
argument is provided.
When ``'binary'``, returns a file-like object where its ``read``
method returns `bytes` objects.
When another string, it is the name of an encoding, and the
file-like object's ``read`` method will return `str` (``unicode``)
objects, decoded from binary using the given encoding.
cache : bool or "update", optional
Whether to cache the contents of remote URLs. If "update",
check the remote URL for a new version but store the result
in the cache.
show_progress : bool, optional
Whether to display a progress bar if the file is downloaded
from a remote server. Default is `True`.
remote_timeout : float
Timeout for remote requests in seconds (default is the configurable
`astropy.utils.data.Conf.remote_timeout`).
sources : list of str, optional
If provided, a list of URLs to try to obtain the file from. The
result will be stored under the original URL. The original URL
will *not* be tried unless it is in this list; this is to prevent
long waits for a primary server that is known to be inaccessible
at the moment.
http_headers : dict or None
HTTP request headers to pass into ``urlopen`` if needed. (These headers
are ignored if the protocol for the ``name_or_obj``/``sources`` entry
is not a remote HTTP URL.) In the default case (None), the headers are
``User-Agent: some_value`` and ``Accept: */*``, where ``some_value``
is set by ``astropy.utils.data.conf.default_http_user_agent``.
Returns
-------
file : readable file-like
"""
# close_fds is a list of file handles created by this function
# that need to be closed. We don't want to always just close the
# returned file handle, because it may simply be the file handle
# passed in. In that case it is not the responsibility of this
# function to close it: doing so could result in a "double close"
# and an "invalid file descriptor" exception.
close_fds = []
delete_fds = []
if remote_timeout is None:
# use configfile default
remote_timeout = conf.remote_timeout
# name_or_obj could be an os.PathLike object
if isinstance(name_or_obj, os.PathLike):
name_or_obj = os.fspath(name_or_obj)
# Get a file object to the content
if isinstance(name_or_obj, str):
is_url = _is_url(name_or_obj)
if is_url:
name_or_obj = download_file(
name_or_obj, cache=cache, show_progress=show_progress,
timeout=remote_timeout, sources=sources,
http_headers=http_headers)
fileobj = io.FileIO(name_or_obj, 'r')
if is_url and not cache:
delete_fds.append(fileobj)
close_fds.append(fileobj)
else:
fileobj = name_or_obj
# Check if the file object supports random access, and if not,
# then wrap it in a BytesIO buffer. It would be nicer to use a
# BufferedReader to avoid reading loading the whole file first,
# but that is not compatible with streams or urllib2.urlopen
# objects on Python 2.x.
if not hasattr(fileobj, 'seek'):
try:
# py.path.LocalPath objects have .read() method but it uses
# text mode, which won't work. .read_binary() does, and
# surely other ducks would return binary contents when
# called like this.
# py.path.LocalPath is what comes from the tmpdir fixture
# in pytest.
fileobj = io.BytesIO(fileobj.read_binary())
except AttributeError:
fileobj = io.BytesIO(fileobj.read())
# Now read enough bytes to look at signature
signature = fileobj.read(4)
fileobj.seek(0)
if signature[:3] == b'\x1f\x8b\x08': # gzip
import struct
try:
import gzip
fileobj_new = gzip.GzipFile(fileobj=fileobj, mode='rb')
fileobj_new.read(1) # need to check that the file is really gzip
except (OSError, EOFError, struct.error): # invalid gzip file
fileobj.seek(0)
fileobj_new.close()
else:
fileobj_new.seek(0)
fileobj = fileobj_new
elif signature[:3] == b'BZh': # bzip2
try:
import bz2
except ImportError:
for fd in close_fds:
fd.close()
raise ModuleNotFoundError(
"This Python installation does not provide the bz2 module.")
try:
# bz2.BZ2File does not support file objects, only filenames, so we
# need to write the data to a temporary file
with NamedTemporaryFile("wb", delete=False) as tmp:
tmp.write(fileobj.read())
tmp.close()
fileobj_new = bz2.BZ2File(tmp.name, mode='rb')
fileobj_new.read(1) # need to check that the file is really bzip2
except OSError: # invalid bzip2 file
fileobj.seek(0)
fileobj_new.close()
# raise
else:
fileobj_new.seek(0)
close_fds.append(fileobj_new)
fileobj = fileobj_new
elif signature[:3] == b'\xfd7z': # xz
try:
import lzma
fileobj_new = lzma.LZMAFile(fileobj, mode='rb')
fileobj_new.read(1) # need to check that the file is really xz
except ImportError:
for fd in close_fds:
fd.close()
raise ModuleNotFoundError(
"This Python installation does not provide the lzma module.")
except (OSError, EOFError): # invalid xz file
fileobj.seek(0)
fileobj_new.close()
# should we propagate this to the caller to signal bad content?
# raise ValueError(e)
else:
fileobj_new.seek(0)
fileobj = fileobj_new
# By this point, we have a file, io.FileIO, gzip.GzipFile, bz2.BZ2File
# or lzma.LZMAFile instance opened in binary mode (that is, read
# returns bytes). Now we need to, if requested, wrap it in a
# io.TextIOWrapper so read will return unicode based on the
# encoding parameter.
needs_textio_wrapper = encoding != 'binary'
if needs_textio_wrapper:
# A bz2.BZ2File can not be wrapped by a TextIOWrapper,
# so we decompress it to a temporary file and then
# return a handle to that.
try:
import bz2
except ImportError:
pass
else:
if isinstance(fileobj, bz2.BZ2File):
tmp = NamedTemporaryFile("wb", delete=False)
data = fileobj.read()
tmp.write(data)
tmp.close()
delete_fds.append(tmp)
fileobj = io.FileIO(tmp.name, 'r')
close_fds.append(fileobj)
fileobj = _NonClosingBufferedReader(fileobj)
fileobj = _NonClosingTextIOWrapper(fileobj, encoding=encoding)
# Ensure that file is at the start - io.FileIO will for
# example not always be at the start:
# >>> import io
# >>> f = open('test.fits', 'rb')
# >>> f.read(4)
# 'SIMP'
# >>> f.seek(0)
# >>> fileobj = io.FileIO(f.fileno())
# >>> fileobj.tell()
# 4096L
fileobj.seek(0)
try:
yield fileobj
finally:
for fd in close_fds:
fd.close()
for fd in delete_fds:
os.remove(fd.name)
def get_file_contents(*args, **kwargs):
"""
Retrieves the contents of a filename or file-like object.
See the `get_readable_fileobj` docstring for details on parameters.
Returns
-------
object
The content of the file (as requested by ``encoding``).
"""
with get_readable_fileobj(*args, **kwargs) as f:
return f.read()
@contextlib.contextmanager
def get_pkg_data_fileobj(data_name, package=None, encoding=None, cache=True):
"""
Retrieves a data file from the standard locations for the package and
provides the file as a file-like object that reads bytes.
Parameters
----------
data_name : str
Name/location of the desired data file. One of the following:
* The name of a data file included in the source
distribution. The path is relative to the module
calling this function. For example, if calling from
``astropy.pkname``, use ``'data/file.dat'`` to get the
file in ``astropy/pkgname/data/file.dat``. Double-dots
can be used to go up a level. In the same example, use
``'../data/file.dat'`` to get ``astropy/data/file.dat``.
* If a matching local file does not exist, the Astropy
data server will be queried for the file.
* A hash like that produced by `compute_hash` can be
requested, prefixed by 'hash/'
e.g. 'hash/34c33b3eb0d56eb9462003af249eff28'. The hash
will first be searched for locally, and if not found,
the Astropy data server will be queried.
package : str, optional
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
encoding : str, optional
When `None` (default), returns a file-like object with a
``read`` method returns `str` (``unicode``) objects, using
`locale.getpreferredencoding` as an encoding. This matches
the default behavior of the built-in `open` when no ``mode``
argument is provided.
When ``'binary'``, returns a file-like object where its ``read``
method returns `bytes` objects.
When another string, it is the name of an encoding, and the
file-like object's ``read`` method will return `str` (``unicode``)
objects, decoded from binary using the given encoding.
cache : bool
If True, the file will be downloaded and saved locally or the
already-cached local copy will be accessed. If False, the
file-like object will directly access the resource (e.g. if a
remote URL is accessed, an object like that from
`urllib.request.urlopen` is returned).
Returns
-------
fileobj : file-like
An object with the contents of the data file available via
``read`` function. Can be used as part of a ``with`` statement,
automatically closing itself after the ``with`` block.
Raises
------
urllib.error.URLError
If a remote file cannot be found.
OSError
If problems occur writing or reading a local file.
Examples
--------
This will retrieve a data file and its contents for the `astropy.wcs`
tests::
>>> from astropy.utils.data import get_pkg_data_fileobj
>>> with get_pkg_data_fileobj('data/3d_cd.hdr',
... package='astropy.wcs.tests') as fobj:
... fcontents = fobj.read()
...
This next example would download a data file from the astropy data server
because the ``allsky/allsky_rosat.fits`` file is not present in the
source distribution. It will also save the file locally so the
next time it is accessed it won't need to be downloaded.::
>>> from astropy.utils.data import get_pkg_data_fileobj
>>> with get_pkg_data_fileobj('allsky/allsky_rosat.fits',
... encoding='binary') as fobj: # doctest: +REMOTE_DATA +IGNORE_OUTPUT
... fcontents = fobj.read()
...
Downloading http://data.astropy.org/allsky/allsky_rosat.fits [Done]
This does the same thing but does *not* cache it locally::
>>> with get_pkg_data_fileobj('allsky/allsky_rosat.fits',
... encoding='binary', cache=False) as fobj: # doctest: +REMOTE_DATA +IGNORE_OUTPUT
... fcontents = fobj.read()
...
Downloading http://data.astropy.org/allsky/allsky_rosat.fits [Done]
See Also
--------
get_pkg_data_contents : returns the contents of a file or url as a bytes object
get_pkg_data_filename : returns a local name for a file containing the data
""" # noqa
datafn = get_pkg_data_path(data_name, package=package)
if os.path.isdir(datafn):
raise OSError("Tried to access a data file that's actually "
"a package data directory")
elif os.path.isfile(datafn): # local file
with get_readable_fileobj(datafn, encoding=encoding) as fileobj:
yield fileobj
else: # remote file
with get_readable_fileobj(
conf.dataurl + data_name,
encoding=encoding,
cache=cache,
sources=[conf.dataurl + data_name,
conf.dataurl_mirror + data_name],
) as fileobj:
# We read a byte to trigger any URLErrors
fileobj.read(1)
fileobj.seek(0)
yield fileobj
def get_pkg_data_filename(data_name, package=None, show_progress=True,
remote_timeout=None):
"""
Retrieves a data file from the standard locations for the package and
provides a local filename for the data.
This function is similar to `get_pkg_data_fileobj` but returns the
file *name* instead of a readable file-like object. This means
that this function must always cache remote files locally, unlike
`get_pkg_data_fileobj`.
Parameters
----------
data_name : str
Name/location of the desired data file. One of the following:
* The name of a data file included in the source
distribution. The path is relative to the module
calling this function. For example, if calling from
``astropy.pkname``, use ``'data/file.dat'`` to get the
file in ``astropy/pkgname/data/file.dat``. Double-dots
can be used to go up a level. In the same example, use
``'../data/file.dat'`` to get ``astropy/data/file.dat``.
* If a matching local file does not exist, the Astropy
data server will be queried for the file.
* A hash like that produced by `compute_hash` can be
requested, prefixed by 'hash/'
e.g. 'hash/34c33b3eb0d56eb9462003af249eff28'. The hash
will first be searched for locally, and if not found,
the Astropy data server will be queried.
package : str, optional
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
show_progress : bool, optional
Whether to display a progress bar if the file is downloaded
from a remote server. Default is `True`.
remote_timeout : float
Timeout for the requests in seconds (default is the
configurable `astropy.utils.data.Conf.remote_timeout`).
Raises
------
urllib.error.URLError
If a remote file cannot be found.
OSError
If problems occur writing or reading a local file.
Returns
-------
filename : str
A file path on the local file system corresponding to the data
requested in ``data_name``.
Examples
--------
This will retrieve the contents of the data file for the `astropy.wcs`
tests::
>>> from astropy.utils.data import get_pkg_data_filename
>>> fn = get_pkg_data_filename('data/3d_cd.hdr',
... package='astropy.wcs.tests')
>>> with open(fn) as f:
... fcontents = f.read()
...
This retrieves a data file by hash either locally or from the astropy data
server::
>>> from astropy.utils.data import get_pkg_data_filename
>>> fn = get_pkg_data_filename('hash/34c33b3eb0d56eb9462003af249eff28') # doctest: +SKIP
>>> with open(fn) as f:
... fcontents = f.read()
...
See Also
--------
get_pkg_data_contents : returns the contents of a file or url as a bytes object
get_pkg_data_fileobj : returns a file-like object with the data
"""
if remote_timeout is None:
# use configfile default
remote_timeout = conf.remote_timeout
if data_name.startswith('hash/'):
# first try looking for a local version if a hash is specified
hashfn = _find_hash_fn(data_name[5:])
if hashfn is None:
return download_file(conf.dataurl + data_name, cache=True,
show_progress=show_progress,
timeout=remote_timeout,
sources=[conf.dataurl + data_name,
conf.dataurl_mirror + data_name])
else:
return hashfn
else:
fs_path = os.path.normpath(data_name)
datafn = get_pkg_data_path(fs_path, package=package)
if os.path.isdir(datafn):
raise OSError("Tried to access a data file that's actually "
"a package data directory")
elif os.path.isfile(datafn): # local file
return datafn
else: # remote file
return download_file(conf.dataurl + data_name, cache=True,
show_progress=show_progress,
timeout=remote_timeout,
sources=[conf.dataurl + data_name,
conf.dataurl_mirror + data_name])
def get_pkg_data_contents(data_name, package=None, encoding=None, cache=True):
"""
Retrieves a data file from the standard locations and returns its
contents as a bytes object.
Parameters
----------
data_name : str
Name/location of the desired data file. One of the following:
* The name of a data file included in the source
distribution. The path is relative to the module
calling this function. For example, if calling from
``astropy.pkname``, use ``'data/file.dat'`` to get the
file in ``astropy/pkgname/data/file.dat``. Double-dots
can be used to go up a level. In the same example, use
``'../data/file.dat'`` to get ``astropy/data/file.dat``.
* If a matching local file does not exist, the Astropy
data server will be queried for the file.
* A hash like that produced by `compute_hash` can be
requested, prefixed by 'hash/'
e.g. 'hash/34c33b3eb0d56eb9462003af249eff28'. The hash
will first be searched for locally, and if not found,
the Astropy data server will be queried.
* A URL to some other file.
package : str, optional
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
encoding : str, optional
When `None` (default), returns a file-like object with a
``read`` method that returns `str` (``unicode``) objects, using
`locale.getpreferredencoding` as an encoding. This matches
the default behavior of the built-in `open` when no ``mode``
argument is provided.
When ``'binary'``, returns a file-like object where its ``read``
method returns `bytes` objects.
When another string, it is the name of an encoding, and the
file-like object's ``read`` method will return `str` (``unicode``)
objects, decoded from binary using the given encoding.
cache : bool
If True, the file will be downloaded and saved locally or the
already-cached local copy will be accessed. If False, the
file-like object will directly access the resource (e.g. if a
remote URL is accessed, an object like that from
`urllib.request.urlopen` is returned).
Returns
-------
contents : bytes
The complete contents of the file as a bytes object.
Raises
------
urllib.error.URLError
If a remote file cannot be found.
OSError
If problems occur writing or reading a local file.
See Also
--------
get_pkg_data_fileobj : returns a file-like object with the data
get_pkg_data_filename : returns a local name for a file containing the data
"""
with get_pkg_data_fileobj(data_name, package=package, encoding=encoding,
cache=cache) as fd:
contents = fd.read()
return contents
def get_pkg_data_filenames(datadir, package=None, pattern='*'):
"""
Returns the path of all of the data files in a given directory
that match a given glob pattern.
Parameters
----------
datadir : str
Name/location of the desired data files. One of the following:
* The name of a directory included in the source
distribution. The path is relative to the module
calling this function. For example, if calling from
``astropy.pkname``, use ``'data'`` to get the
files in ``astropy/pkgname/data``.
* Remote URLs are not currently supported.
package : str, optional
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
pattern : str, optional
A UNIX-style filename glob pattern to match files. See the
`glob` module in the standard library for more information.
By default, matches all files.
Returns
-------
filenames : iterator of str
Paths on the local filesystem in *datadir* matching *pattern*.
Examples
--------
This will retrieve the contents of the data file for the `astropy.wcs`
tests::
>>> from astropy.utils.data import get_pkg_data_filenames
>>> for fn in get_pkg_data_filenames('data/maps', 'astropy.wcs.tests',
... '*.hdr'):
... with open(fn) as f:
... fcontents = f.read()
...
"""
path = get_pkg_data_path(datadir, package=package)
if os.path.isfile(path):
raise OSError(
"Tried to access a data directory that's actually "
"a package data file")
elif os.path.isdir(path):
for filename in os.listdir(path):
if fnmatch.fnmatch(filename, pattern):
yield os.path.join(path, filename)
else:
raise OSError("Path not found")
def get_pkg_data_fileobjs(datadir, package=None, pattern='*', encoding=None):
"""
Returns readable file objects for all of the data files in a given
directory that match a given glob pattern.
Parameters
----------
datadir : str
Name/location of the desired data files. One of the following:
* The name of a directory included in the source
distribution. The path is relative to the module
calling this function. For example, if calling from
``astropy.pkname``, use ``'data'`` to get the
files in ``astropy/pkgname/data``
* Remote URLs are not currently supported
package : str, optional
If specified, look for a file relative to the given package, rather
than the default of looking relative to the calling module's package.
pattern : str, optional
A UNIX-style filename glob pattern to match files. See the
`glob` module in the standard library for more information.
By default, matches all files.
encoding : str, optional
When `None` (default), returns a file-like object with a
``read`` method that returns `str` (``unicode``) objects, using
`locale.getpreferredencoding` as an encoding. This matches
the default behavior of the built-in `open` when no ``mode``
argument is provided.
When ``'binary'``, returns a file-like object where its ``read``
method returns `bytes` objects.
When another string, it is the name of an encoding, and the
file-like object's ``read`` method will return `str` (``unicode``)
objects, decoded from binary using the given encoding.
Returns
-------
fileobjs : iterator of file object
File objects for each of the files on the local filesystem in
*datadir* matching *pattern*.
Examples
--------
This will retrieve the contents of the data file for the `astropy.wcs`
tests::
>>> from astropy.utils.data import get_pkg_data_filenames
>>> for fd in get_pkg_data_fileobjs('data/maps', 'astropy.wcs.tests',
... '*.hdr'):
... fcontents = fd.read()
...
"""
for fn in get_pkg_data_filenames(datadir, package=package,
pattern=pattern):
with get_readable_fileobj(fn, encoding=encoding) as fd:
yield fd
def compute_hash(localfn):
""" Computes the MD5 hash for a file.
The hash for a data file is used for looking up data files in a unique
fashion. This is of particular use for tests; a test may require a
particular version of a particular file, in which case it can be accessed
via hash to get the appropriate version.
Typically, if you wish to write a test that requires a particular data
file, you will want to submit that file to the astropy data servers, and
use
e.g. ``get_pkg_data_filename('hash/34c33b3eb0d56eb9462003af249eff28')``,
but with the hash for your file in place of the hash in the example.
Parameters
----------
localfn : str
The path to the file for which the hash should be generated.
Returns
-------
hash : str
The hex digest of the cryptographic hash for the contents of the
``localfn`` file.
"""
with open(localfn, 'rb') as f:
h = hashlib.md5()
block = f.read(conf.compute_hash_block_size)
while block:
h.update(block)
block = f.read(conf.compute_hash_block_size)
return h.hexdigest()
def get_pkg_data_path(*path, package=None):
"""Get path from source-included data directories.
Parameters
----------
*path : str
Name/location of the desired data file/directory.
May be a tuple of strings for ``os.path`` joining.
package : str or None, optional, keyword-only
If specified, look for a file relative to the given package, rather
than the calling module's package.
Returns
-------
path : str
Name/location of the desired data file/directory.
Raises
------
ImportError
Given package or module is not importable.
RuntimeError
If the local data file is outside of the package's tree.
"""
if package is None:
module = find_current_module(1, finddiff=['astropy.utils.data', 'contextlib'])
if module is None:
# not called from inside an astropy package. So just pass name
# through
return os.path.join(*path)
if not hasattr(module, '__package__') or not module.__package__:
# The __package__ attribute may be missing or set to None; see
# PEP-366, also astropy issue #1256
if '.' in module.__name__:
package = module.__name__.rpartition('.')[0]
else:
package = module.__name__
else:
package = module.__package__
else:
# package errors if it isn't a str
# so there is no need for checks in the containing if/else
module = resolve_name(package)
# module path within package
module_path = os.path.dirname(module.__file__)
full_path = os.path.join(module_path, *path)
# Check that file is inside tree.
rootpkgname = package.partition('.')[0]
rootpkg = resolve_name(rootpkgname)
root_dir = os.path.dirname(rootpkg.__file__)
if not _is_inside(full_path, root_dir):
raise RuntimeError(f"attempted to get a local data file outside "
f"of the {rootpkgname} tree.")
return full_path
def _find_hash_fn(hexdigest, pkgname='astropy'):
"""
Looks for a local file by hash - returns file name if found and a valid
file, otherwise returns None.
"""
for v in cache_contents(pkgname=pkgname).values():
if compute_hash(v) == hexdigest:
return v
return None
def get_free_space_in_dir(path, unit=False):
"""
Given a path to a directory, returns the amount of free space
on that filesystem.
Parameters
----------
path : str
The path to a directory.
unit : bool or `~astropy.units.Unit`
Return the amount of free space as Quantity in the given unit,
if provided. Default is `False` for backward-compatibility.
Returns
-------
free_space : int or `~astropy.units.Quantity`
The amount of free space on the partition that the directory is on.
If ``unit=False``, it is returned as plain integer (in bytes).
"""
if not os.path.isdir(path):
raise OSError(
"Can only determine free space associated with directories, "
"not files.")
# Actually you can on Linux but I want to avoid code that fails
# on Windows only.
free_space = shutil.disk_usage(path).free
if unit:
from astropy import units as u
# TODO: Automatically determine best prefix to use.
if unit is True:
unit = u.byte
free_space = u.Quantity(free_space, u.byte).to(unit)
return free_space
def check_free_space_in_dir(path, size):
"""
Determines if a given directory has enough space to hold a file of
a given size.
Parameters
----------
path : str
The path to a directory.
size : int or `~astropy.units.Quantity`
A proposed filesize. If not a Quantity, assume it is in bytes.
Raises
------
OSError
There is not enough room on the filesystem.
"""
space = get_free_space_in_dir(path, unit=getattr(size, 'unit', False))
if space < size:
from astropy.utils.console import human_file_size
raise OSError(f"Not enough free space in {path} "
f"to download a {human_file_size(size)} file, "
f"only {human_file_size(space)} left")
class _ftptlswrapper(urllib.request.ftpwrapper):
def init(self):
self.busy = 0
self.ftp = ftplib.FTP_TLS()
self.ftp.connect(self.host, self.port, self.timeout)
self.ftp.login(self.user, self.passwd)
self.ftp.prot_p()
_target = '/'.join(self.dirs)
self.ftp.cwd(_target)
class _FTPTLSHandler(urllib.request.FTPHandler):
def connect_ftp(self, user, passwd, host, port, dirs, timeout):
return _ftptlswrapper(user, passwd, host, port, dirs, timeout,
persistent=False)
@functools.lru_cache
def _build_urlopener(ftp_tls=False, ssl_context=None, allow_insecure=False):
"""
Helper for building a `urllib.request.build_opener` which handles TLS/SSL.
"""
# Import ssl here to avoid import failure when running in pyodide/Emscripten
import ssl
ssl_context = dict(it for it in ssl_context) if ssl_context else {}
cert_chain = {}
if 'certfile' in ssl_context:
cert_chain.update({
'certfile': ssl_context.pop('certfile'),
'keyfile': ssl_context.pop('keyfile', None),
'password': ssl_context.pop('password', None)
})
elif 'password' in ssl_context or 'keyfile' in ssl_context:
raise ValueError(
"passing 'keyfile' or 'password' in the ssl_context argument "
"requires passing 'certfile' as well")
if 'cafile' not in ssl_context and certifi is not None:
ssl_context['cafile'] = certifi.where()
ssl_context = ssl.create_default_context(**ssl_context)
if allow_insecure:
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
if cert_chain:
ssl_context.load_cert_chain(**cert_chain)
https_handler = urllib.request.HTTPSHandler(context=ssl_context)
if ftp_tls:
urlopener = urllib.request.build_opener(_FTPTLSHandler(), https_handler)
else:
urlopener = urllib.request.build_opener(https_handler)
return urlopener
def _try_url_open(source_url, timeout=None, http_headers=None, ftp_tls=False,
ssl_context=None, allow_insecure=False):
"""Helper for opening a URL while handling TLS/SSL verification issues."""
# Import ssl here to avoid import failure when running in pyodide/Emscripten
import ssl
# Always try first with a secure connection
# _build_urlopener uses lru_cache, so the ssl_context argument must be
# converted to a hashshable type (a set of 2-tuples)
ssl_context = frozenset(ssl_context.items() if ssl_context else [])
urlopener = _build_urlopener(ftp_tls=ftp_tls, ssl_context=ssl_context,
allow_insecure=False)
req = urllib.request.Request(source_url, headers=http_headers)
try:
return urlopener.open(req, timeout=timeout)
except urllib.error.URLError as exc:
reason = exc.reason
if (isinstance(reason, ssl.SSLError)
and reason.reason == 'CERTIFICATE_VERIFY_FAILED'):
msg = (f'Verification of TLS/SSL certificate at {source_url} '
f'failed: this can mean either the server is '
f'misconfigured or your local root CA certificates are '
f'out-of-date; in the latter case this can usually be '
f'addressed by installing the Python package "certifi" '
f'(see the documentation for astropy.utils.data.download_url)')
if not allow_insecure:
msg += (f' or in both cases you can work around this by '
f'passing allow_insecure=True, but only if you '
f'understand the implications; the original error '
f'was: {reason}')
raise urllib.error.URLError(msg)
else:
msg += '. Re-trying with allow_insecure=True.'
warn(msg, AstropyWarning)
# Try again with a new urlopener allowing insecure connections
urlopener = _build_urlopener(ftp_tls=ftp_tls, ssl_context=ssl_context,
allow_insecure=True)
return urlopener.open(req, timeout=timeout)
raise
def _download_file_from_source(source_url, show_progress=True, timeout=None,
remote_url=None, cache=False, pkgname='astropy',
http_headers=None, ftp_tls=None,
ssl_context=None, allow_insecure=False):
from astropy.utils.console import ProgressBarOrSpinner
if not conf.allow_internet:
raise urllib.error.URLError(
f"URL {remote_url} was supposed to be downloaded but "
f"allow_internet is {conf.allow_internet}; "
f"if this is unexpected check the astropy.cfg file for the option "
f"allow_internet")
if remote_url is None:
remote_url = source_url
if http_headers is None:
http_headers = {}
if ftp_tls is None and urllib.parse.urlparse(remote_url).scheme == "ftp":
try:
return _download_file_from_source(source_url,
show_progress=show_progress,
timeout=timeout,
remote_url=remote_url,
cache=cache,
pkgname=pkgname,
http_headers=http_headers,
ftp_tls=False)
except urllib.error.URLError as e:
# e.reason might not be a string, e.g. socket.gaierror
if str(e.reason).startswith("ftp error: error_perm"):
ftp_tls = True
else:
raise
with _try_url_open(source_url, timeout=timeout, http_headers=http_headers,
ftp_tls=ftp_tls, ssl_context=ssl_context,
allow_insecure=allow_insecure) as remote:
info = remote.info()
try:
size = int(info['Content-Length'])
except (KeyError, ValueError, TypeError):
size = None
if size is not None:
check_free_space_in_dir(gettempdir(), size)
if cache:
dldir = _get_download_cache_loc(pkgname)
check_free_space_in_dir(dldir, size)
# If a user has overridden sys.stdout it might not have the
# isatty method, in that case assume it's not a tty
is_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
if show_progress and is_tty:
progress_stream = sys.stdout
else:
progress_stream = io.StringIO()
if source_url == remote_url:
dlmsg = f"Downloading {remote_url}"
else:
dlmsg = f"Downloading {remote_url} from {source_url}"
with ProgressBarOrSpinner(size, dlmsg, file=progress_stream) as p:
with NamedTemporaryFile(prefix=f"astropy-download-{os.getpid()}-",
delete=False) as f:
try:
bytes_read = 0
block = remote.read(conf.download_block_size)
while block:
f.write(block)
bytes_read += len(block)
p.update(bytes_read)
block = remote.read(conf.download_block_size)
if size is not None and bytes_read > size:
raise urllib.error.URLError(
f"File was supposed to be {size} bytes but "
f"server provides more, at least {bytes_read} "
f"bytes. Download failed.")
if size is not None and bytes_read < size:
raise urllib.error.ContentTooShortError(
f"File was supposed to be {size} bytes but we "
f"only got {bytes_read} bytes. Download failed.",
content=None)
except BaseException:
if os.path.exists(f.name):
try:
os.remove(f.name)
except OSError:
pass
raise
return f.name
def download_file(remote_url, cache=False, show_progress=True, timeout=None,
sources=None, pkgname='astropy', http_headers=None,
ssl_context=None, allow_insecure=False):
"""Downloads a URL and optionally caches the result.
It returns the filename of a file containing the URL's contents.
If ``cache=True`` and the file is present in the cache, just
returns the filename; if the file had to be downloaded, add it
to the cache. If ``cache="update"`` always download and add it
to the cache.
The cache is effectively a dictionary mapping URLs to files; by default the
file contains the contents of the URL that is its key, but in practice
these can be obtained from a mirror (using ``sources``) or imported from
the local filesystem (using `~import_file_to_cache` or
`~import_download_cache`). Regardless, each file is regarded as
representing the contents of a particular URL, and this URL should be used
to look them up or otherwise manipulate them.
The files in the cache directory are named according to a cryptographic
hash of their URLs (currently MD5, so hackers can cause collisions).
The modification times on these files normally indicate when they were
last downloaded from the Internet.
Parameters
----------
remote_url : str
The URL of the file to download
cache : bool or "update", optional
Whether to cache the contents of remote URLs. If "update",
always download the remote URL in case there is a new version
and store the result in the cache.
show_progress : bool, optional
Whether to display a progress bar during the download (default
is `True`). Regardless of this setting, the progress bar is only
displayed when outputting to a terminal.
timeout : float, optional
Timeout for remote requests in seconds (default is the configurable
`astropy.utils.data.Conf.remote_timeout`).
sources : list of str, optional
If provided, a list of URLs to try to obtain the file from. The
result will be stored under the original URL. The original URL
will *not* be tried unless it is in this list; this is to prevent
long waits for a primary server that is known to be inaccessible
at the moment. If an empty list is passed, then ``download_file``
will not attempt to connect to the Internet, that is, if the file
is not in the cache a KeyError will be raised.
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
http_headers : dict or None
HTTP request headers to pass into ``urlopen`` if needed. (These headers
are ignored if the protocol for the ``name_or_obj``/``sources`` entry
is not a remote HTTP URL.) In the default case (None), the headers are
``User-Agent: some_value`` and ``Accept: */*``, where ``some_value``
is set by ``astropy.utils.data.conf.default_http_user_agent``.
ssl_context : dict, optional
Keyword arguments to pass to `ssl.create_default_context` when
downloading from HTTPS or TLS+FTP sources. This can be used provide
alternative paths to root CA certificates. Additionally, if the key
``'certfile'`` and optionally ``'keyfile'`` and ``'password'`` are
included, they are passed to `ssl.SSLContext.load_cert_chain`. This
can be used for performing SSL/TLS client certificate authentication
for servers that require it.
allow_insecure : bool, optional
Allow downloading files over a TLS/SSL connection even when the server
certificate verification failed. When set to `True` the potentially
insecure download is allowed to proceed, but an
`~astropy.utils.exceptions.AstropyWarning` is issued. If you are
frequently getting certificate verification warnings, consider
installing or upgrading `certifi`_ package, which provides frequently
updated certificates for common root CAs (i.e., a set similar to those
used by web browsers). If installed, Astropy will use it
automatically.
.. _certifi: https://pypi.org/project/certifi/
Returns
-------
local_path : str
Returns the local path that the file was download to.
Raises
------
urllib.error.URLError
Whenever there's a problem getting the remote file.
KeyError
When a file was requested from the cache but is missing and no
sources were provided to obtain it from the Internet.
Notes
-----
Because this function returns a filename, another process could run
`clear_download_cache` before you actually open the file, leaving
you with a filename that no longer points to a usable file.
"""
if timeout is None:
timeout = conf.remote_timeout
if sources is None:
sources = [remote_url]
if http_headers is None:
http_headers = {'User-Agent': conf.default_http_user_agent,
'Accept': '*/*'}
missing_cache = ""
url_key = remote_url
if cache:
try:
dldir = _get_download_cache_loc(pkgname)
except OSError as e:
cache = False
missing_cache = (
f"Cache directory cannot be read or created ({e}), "
f"providing data in temporary file instead."
)
else:
if cache == "update":
pass
elif isinstance(cache, str):
raise ValueError(f"Cache value '{cache}' was requested but "
f"'update' is the only recognized string; "
f"otherwise use a boolean")
else:
filename = os.path.join(dldir, _url_to_dirname(url_key), "contents")
if os.path.exists(filename):
return os.path.abspath(filename)
errors = {}
for source_url in sources:
try:
f_name = _download_file_from_source(
source_url,
timeout=timeout,
show_progress=show_progress,
cache=cache,
remote_url=remote_url,
pkgname=pkgname,
http_headers=http_headers,
ssl_context=ssl_context,
allow_insecure=allow_insecure)
# Success!
break
except urllib.error.URLError as e:
# errno 8 is from SSL "EOF occurred in violation of protocol"
if (hasattr(e, 'reason')
and hasattr(e.reason, 'errno')
and e.reason.errno == 8):
e.reason.strerror = (e.reason.strerror +
'. requested URL: '
+ remote_url)
e.reason.args = (e.reason.errno, e.reason.strerror)
errors[source_url] = e
else: # No success
if not sources:
raise KeyError(
f"No sources listed and file {remote_url} not in cache! "
f"Please include primary URL in sources if you want it to be "
f"included as a valid source.")
elif len(sources) == 1:
raise errors[sources[0]]
else:
raise urllib.error.URLError(
f"Unable to open any source! Exceptions were {errors}") \
from errors[sources[0]]
if cache:
try:
return import_file_to_cache(url_key, f_name,
remove_original=True,
replace=(cache == 'update'),
pkgname=pkgname)
except PermissionError as e:
# Cache is readonly, we can't update it
missing_cache = (
f"Cache directory appears to be read-only ({e}), unable to import "
f"downloaded file, providing data in temporary file {f_name} "
f"instead.")
# FIXME: other kinds of cache problem can occur?
if missing_cache:
warn(CacheMissingWarning(missing_cache, f_name))
if conf.delete_temporary_downloads_at_exit:
global _tempfilestodel
_tempfilestodel.append(f_name)
return os.path.abspath(f_name)
def is_url_in_cache(url_key, pkgname='astropy'):
"""Check if a download for ``url_key`` is in the cache.
The provided ``url_key`` will be the name used in the cache. The contents
may have been downloaded from this URL or from a mirror or they may have
been provided by the user. See `~download_file` for details.
Parameters
----------
url_key : str
The URL retrieved
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
Returns
-------
in_cache : bool
`True` if a download for ``url_key`` is in the cache, `False` if not
or if the cache does not exist at all.
See Also
--------
cache_contents : obtain a dictionary listing everything in the cache
"""
try:
dldir = _get_download_cache_loc(pkgname)
except OSError:
return False
filename = os.path.join(dldir, _url_to_dirname(url_key), "contents")
return os.path.exists(filename)
def cache_total_size(pkgname='astropy'):
"""Return the total size in bytes of all files in the cache."""
size = 0
dldir = _get_download_cache_loc(pkgname=pkgname)
for root, dirs, files in os.walk(dldir):
size += sum(os.path.getsize(os.path.join(root, name)) for name in files)
return size
def _do_download_files_in_parallel(kwargs):
with astropy.config.paths.set_temp_config(kwargs.pop("temp_config")):
with astropy.config.paths.set_temp_cache(kwargs.pop("temp_cache")):
return download_file(**kwargs)
def download_files_in_parallel(urls,
cache="update",
show_progress=True,
timeout=None,
sources=None,
multiprocessing_start_method=None,
pkgname='astropy'):
"""Download multiple files in parallel from the given URLs.
Blocks until all files have downloaded. The result is a list of
local file paths corresponding to the given urls.
The results will be stored in the cache under the values in ``urls`` even
if they are obtained from some other location via ``sources``. See
`~download_file` for details.
Parameters
----------
urls : list of str
The URLs to retrieve.
cache : bool or "update", optional
Whether to use the cache (default is `True`). If "update",
always download the remote URLs to see if new data is available
and store the result in cache.
.. versionchanged:: 4.0
The default was changed to ``"update"`` and setting it to
``False`` will print a Warning and set it to ``"update"`` again,
because the function will not work properly without cache. Using
``True`` will work as expected.
.. versionchanged:: 3.0
The default was changed to ``True`` and setting it to ``False``
will print a Warning and set it to ``True`` again, because the
function will not work properly without cache.
show_progress : bool, optional
Whether to display a progress bar during the download (default
is `True`)
timeout : float, optional
Timeout for each individual requests in seconds (default is the
configurable `astropy.utils.data.Conf.remote_timeout`).
sources : dict, optional
If provided, for each URL a list of URLs to try to obtain the
file from. The result will be stored under the original URL.
For any URL in this dictionary, the original URL will *not* be
tried unless it is in this list; this is to prevent long waits
for a primary server that is known to be inaccessible at the
moment.
multiprocessing_start_method : str, optional
Useful primarily for testing; if in doubt leave it as the default.
When using multiprocessing, certain anomalies occur when starting
processes with the "spawn" method (the only option on Windows);
other anomalies occur with the "fork" method (the default on
Linux).
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
Returns
-------
paths : list of str
The local file paths corresponding to the downloaded URLs.
Notes
-----
If a URL is unreachable, the downloading will grind to a halt and the
exception will propagate upward, but an unpredictable number of
files will have been successfully downloaded and will remain in
the cache.
"""
from .console import ProgressBar
if timeout is None:
timeout = conf.remote_timeout
if sources is None:
sources = {}
if not cache:
# See issue #6662, on windows won't work because the files are removed
# again before they can be used. On *NIX systems it will behave as if
# cache was set to True because multiprocessing cannot insert the items
# in the list of to-be-removed files. This could be fixed, but really,
# just use the cache, with update_cache if appropriate.
warn('Disabling the cache does not work because of multiprocessing, '
'it will be set to ``"update"``. You may need to manually remove '
'the cached files with clear_download_cache() afterwards.',
AstropyWarning)
cache = "update"
if show_progress:
progress = sys.stdout
else:
progress = io.BytesIO()
# Combine duplicate URLs
combined_urls = list(set(urls))
combined_paths = ProgressBar.map(
_do_download_files_in_parallel,
[dict(remote_url=u,
cache=cache,
show_progress=False,
timeout=timeout,
sources=sources.get(u, None),
pkgname=pkgname,
temp_cache=astropy.config.paths.set_temp_cache._temp_path,
temp_config=astropy.config.paths.set_temp_config._temp_path)
for u in combined_urls],
file=progress,
multiprocess=True,
multiprocessing_start_method=multiprocessing_start_method,
)
paths = []
for url in urls:
paths.append(combined_paths[combined_urls.index(url)])
return paths
# This is used by download_file and _deltemps to determine the files to delete
# when the interpreter exits
_tempfilestodel = []
@atexit.register
def _deltemps():
global _tempfilestodel
if _tempfilestodel is not None:
while len(_tempfilestodel) > 0:
fn = _tempfilestodel.pop()
if os.path.isfile(fn):
try:
os.remove(fn)
except OSError:
# oh well we tried
# could be held open by some process, on Windows
pass
elif os.path.isdir(fn):
try:
shutil.rmtree(fn)
except OSError:
# couldn't get rid of it, sorry
# could be held open by some process, on Windows
pass
def clear_download_cache(hashorurl=None, pkgname='astropy'):
"""Clears the data file cache by deleting the local file(s).
If a URL is provided, it will be the name used in the cache. The contents
may have been downloaded from this URL or from a mirror or they may have
been provided by the user. See `~download_file` for details.
For the purposes of this function, a file can also be identified by a hash
of its contents or by the filename under which the data is stored (as
returned by `~download_file`, for example).
Parameters
----------
hashorurl : str or None
If None, the whole cache is cleared. Otherwise, specify
a hash for the cached file that is supposed to be deleted,
the full path to a file in the cache that should be deleted,
or a URL that should be removed from the cache if present.
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
"""
try:
dldir = _get_download_cache_loc(pkgname)
except OSError as e:
# Problem arose when trying to open the cache
# Just a warning, though
msg = 'Not clearing data cache - cache inaccessible due to '
estr = '' if len(e.args) < 1 else (': ' + str(e))
warn(CacheMissingWarning(msg + e.__class__.__name__ + estr))
return
try:
if hashorurl is None:
# Optional: delete old incompatible caches too
_rmtree(dldir)
elif _is_url(hashorurl):
filepath = os.path.join(dldir, _url_to_dirname(hashorurl))
_rmtree(filepath)
else:
# Not a URL, it should be either a filename or a hash
filepath = os.path.join(dldir, hashorurl)
rp = os.path.relpath(filepath, dldir)
if rp.startswith(".."):
raise RuntimeError(
f"attempted to use clear_download_cache on the path "
f"{filepath} outside the data cache directory {dldir}")
d, f = os.path.split(rp)
if d and f in ["contents", "url"]:
# It's a filename not the hash of a URL
# so we want to zap the directory containing the
# files "url" and "contents"
filepath = os.path.join(dldir, d)
if os.path.exists(filepath):
_rmtree(filepath)
elif (len(hashorurl) == 2*hashlib.md5().digest_size
and re.match(r"[0-9a-f]+", hashorurl)):
# It's the hash of some file contents, we have to find the right file
filename = _find_hash_fn(hashorurl)
if filename is not None:
clear_download_cache(filename)
except OSError as e:
msg = 'Not clearing data from cache - problem arose '
estr = '' if len(e.args) < 1 else (': ' + str(e))
warn(CacheMissingWarning(msg + e.__class__.__name__ + estr))
def _get_download_cache_loc(pkgname='astropy'):
"""Finds the path to the cache directory and makes them if they don't exist.
Parameters
----------
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
Returns
-------
datadir : str
The path to the data cache directory.
"""
try:
datadir = os.path.join(astropy.config.paths.get_cache_dir(pkgname), 'download', 'url')
if not os.path.exists(datadir):
try:
os.makedirs(datadir)
except OSError:
if not os.path.exists(datadir):
raise
elif not os.path.isdir(datadir):
raise OSError(f'Data cache directory {datadir} is not a directory')
return datadir
except OSError as e:
msg = 'Remote data cache could not be accessed due to '
estr = '' if len(e.args) < 1 else (': ' + str(e))
warn(CacheMissingWarning(msg + e.__class__.__name__ + estr))
raise
def _url_to_dirname(url):
if not _is_url(url):
raise ValueError(f"Malformed URL: '{url}'")
# Make domain names case-insensitive
# Also makes the http:// case-insensitive
urlobj = list(urllib.parse.urlsplit(url))
urlobj[1] = urlobj[1].lower()
if urlobj[0].lower() in ['http', 'https'] and urlobj[1] and urlobj[2] == '':
urlobj[2] = '/'
url_c = urllib.parse.urlunsplit(urlobj)
return hashlib.md5(url_c.encode("utf-8")).hexdigest()
class ReadOnlyDict(dict):
def __setitem__(self, key, value):
raise TypeError("This object is read-only.")
_NOTHING = ReadOnlyDict({})
class CacheDamaged(ValueError):
"""Record the URL or file that was a problem.
Using clear_download_cache on the .bad_file or .bad_url attribute,
whichever is not None, should resolve this particular problem.
"""
def __init__(self, *args, bad_urls=None, bad_files=None, **kwargs):
super().__init__(*args, **kwargs)
self.bad_urls = bad_urls if bad_urls is not None else []
self.bad_files = bad_files if bad_files is not None else []
def check_download_cache(pkgname='astropy'):
"""Do a consistency check on the cache.
.. note::
Since v5.0, this function no longer returns anything.
Because the cache is shared by all versions of ``astropy`` in all virtualenvs
run by your user, possibly concurrently, it could accumulate problems.
This could lead to hard-to-debug problems or wasted space. This function
detects a number of incorrect conditions, including nonexistent files that
are indexed, files that are indexed but in the wrong place, and, if you
request it, files whose content does not match the hash that is indexed.
This function also returns a list of non-indexed files. A few will be
associated with the shelve object; their exact names depend on the backend
used but will probably be based on ``urlmap``. The presence of other files
probably indicates that something has gone wrong and inaccessible files
have accumulated in the cache. These can be removed with
:func:`clear_download_cache`, either passing the filename returned here, or
with no arguments to empty the entire cache and return it to a
reasonable, if empty, state.
Parameters
----------
pkgname : str, optional
The package name to use to locate the download cache, i.e., for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
Raises
------
`~astropy.utils.data.CacheDamaged`
To indicate a problem with the cache contents; the exception contains
a ``.bad_files`` attribute containing a set of filenames to allow the
user to use :func:`clear_download_cache` to remove the offending items.
OSError, RuntimeError
To indicate some problem with the cache structure. This may need a full
:func:`clear_download_cache` to resolve, or may indicate some kind of
misconfiguration.
"""
bad_files = set()
messages = set()
dldir = _get_download_cache_loc(pkgname=pkgname)
with os.scandir(dldir) as it:
for entry in it:
f = os.path.abspath(os.path.join(dldir, entry.name))
if entry.name.startswith("rmtree-"):
if f not in _tempfilestodel:
bad_files.add(f)
messages.add(f"Cache entry {entry.name} not scheduled for deletion")
elif entry.is_dir():
for sf in os.listdir(f):
if sf in ['url', 'contents']:
continue
sf = os.path.join(f, sf)
bad_files.add(sf)
messages.add(f"Unexpected file f{sf}")
urlf = os.path.join(f, "url")
url = None
if not os.path.isfile(urlf):
bad_files.add(urlf)
messages.add(f"Problem with URL file f{urlf}")
else:
url = get_file_contents(urlf, encoding="utf-8")
if not _is_url(url):
bad_files.add(f)
messages.add(f"Malformed URL: {url}")
else:
hashname = _url_to_dirname(url)
if entry.name != hashname:
bad_files.add(f)
messages.add(f"URL hashes to {hashname} but is stored in {entry.name}")
if not os.path.isfile(os.path.join(f, "contents")):
bad_files.add(f)
if url is None:
messages.add(f"Hash {entry.name} is missing contents")
else:
messages.add(f"URL {url} with hash {entry.name} is missing contents")
else:
bad_files.add(f)
messages.add(f"Left-over non-directory {f} in cache")
if bad_files:
raise CacheDamaged("\n".join(messages), bad_files=bad_files)
@contextlib.contextmanager
def _SafeTemporaryDirectory(suffix=None, prefix=None, dir=None):
"""Temporary directory context manager
This will not raise an exception if the temporary directory goes away
before it's supposed to be deleted. Specifically, what is deleted will
be the directory *name* produced; if no such directory exists, no
exception will be raised.
It would be safer to delete it only if it's really the same directory
- checked by file descriptor - and if it's still called the same thing.
But that opens a platform-specific can of worms.
It would also be more robust to use ExitStack and TemporaryDirectory,
which is more aggressive about removing readonly things.
"""
d = mkdtemp(suffix=suffix, prefix=prefix, dir=dir)
try:
yield d
finally:
try:
shutil.rmtree(d)
except OSError:
pass
def _rmtree(path, replace=None):
"""More-atomic rmtree. Ignores missing directory."""
with TemporaryDirectory(prefix="rmtree-",
dir=os.path.dirname(os.path.abspath(path))) as d:
try:
os.rename(path, os.path.join(d, "to-zap"))
except FileNotFoundError:
pass
except PermissionError:
warn(CacheMissingWarning(
f"Unable to remove directory {path} because a file in it "
f"is in use and you are on Windows", path))
raise
if replace is not None:
try:
os.rename(replace, path)
except FileExistsError:
# already there, fine
pass
except OSError as e:
if e.errno == errno.ENOTEMPTY:
# already there, fine
pass
else:
raise
def import_file_to_cache(url_key, filename,
remove_original=False,
pkgname='astropy',
*,
replace=True):
"""Import the on-disk file specified by filename to the cache.
The provided ``url_key`` will be the name used in the cache. The file
should contain the contents of this URL, at least notionally (the URL may
be temporarily or permanently unavailable). It is using ``url_key`` that
users will request these contents from the cache. See :func:`download_file` for
details.
If ``url_key`` already exists in the cache, it will be updated to point to
these imported contents, and its old contents will be deleted from the
cache.
Parameters
----------
url_key : str
The key to index the file under. This should probably be
the URL where the file was located, though if you obtained
it from a mirror you should use the URL of the primary
location.
filename : str
The file whose contents you want to import.
remove_original : bool
Whether to remove the original file (``filename``) once import is
complete.
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
replace : boolean, optional
Whether or not to replace an existing object in the cache, if one exists.
If replacement is not requested but the object exists, silently pass.
"""
cache_dir = _get_download_cache_loc(pkgname=pkgname)
cache_dirname = _url_to_dirname(url_key)
local_dirname = os.path.join(cache_dir, cache_dirname)
local_filename = os.path.join(local_dirname, "contents")
with _SafeTemporaryDirectory(prefix="temp_dir", dir=cache_dir) as temp_dir:
temp_filename = os.path.join(temp_dir, "contents")
# Make sure we're on the same filesystem
# This will raise an exception if the url_key doesn't turn into a valid filename
shutil.copy(filename, temp_filename)
with open(os.path.join(temp_dir, "url"), "wt", encoding="utf-8") as f:
f.write(url_key)
if replace:
_rmtree(local_dirname, replace=temp_dir)
else:
try:
os.rename(temp_dir, local_dirname)
except FileExistsError:
# already there, fine
pass
except OSError as e:
if e.errno == errno.ENOTEMPTY:
# already there, fine
pass
else:
raise
if remove_original:
os.remove(filename)
return os.path.abspath(local_filename)
def get_cached_urls(pkgname='astropy'):
"""
Get the list of URLs in the cache. Especially useful for looking up what
files are stored in your cache when you don't have internet access.
The listed URLs are the keys programs should use to access the file
contents, but those contents may have actually been obtained from a mirror.
See `~download_file` for details.
Parameters
----------
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
Returns
-------
cached_urls : list
List of cached URLs.
See Also
--------
cache_contents : obtain a dictionary listing everything in the cache
"""
return sorted(cache_contents(pkgname=pkgname).keys())
def cache_contents(pkgname='astropy'):
"""Obtain a dict mapping cached URLs to filenames.
This dictionary is a read-only snapshot of the state of the cache when this
function was called. If other processes are actively working with the
cache, it is possible for them to delete files that are listed in this
dictionary. Use with some caution if you are working on a system that is
busy with many running astropy processes, although the same issues apply to
most functions in this module.
"""
r = {}
try:
dldir = _get_download_cache_loc(pkgname=pkgname)
except OSError:
return _NOTHING
with os.scandir(dldir) as it:
for entry in it:
if entry.is_dir:
url = get_file_contents(os.path.join(dldir, entry.name, "url"), encoding="utf-8")
r[url] = os.path.abspath(os.path.join(dldir, entry.name, "contents"))
return ReadOnlyDict(r)
def export_download_cache(filename_or_obj, urls=None, overwrite=False, pkgname='astropy'):
"""Exports the cache contents as a ZIP file.
Parameters
----------
filename_or_obj : str or file-like
Where to put the created ZIP file. Must be something the zipfile
module can write to.
urls : iterable of str or None
The URLs to include in the exported cache. The default is all
URLs currently in the cache. If a URL is included in this list
but is not currently in the cache, a KeyError will be raised.
To ensure that all are in the cache use `~download_file`
or `~download_files_in_parallel`.
overwrite : bool, optional
If filename_or_obj is a filename that exists, it will only be
overwritten if this is True.
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
See Also
--------
import_download_cache : import the contents of such a ZIP file
import_file_to_cache : import a single file directly
"""
if urls is None:
urls = get_cached_urls(pkgname)
with zipfile.ZipFile(filename_or_obj, 'w' if overwrite else 'x') as z:
for u in urls:
fn = download_file(u, cache=True, sources=[], pkgname=pkgname)
# Do not use os.path.join because ZIP files want
# "/" on all platforms
z_fn = urllib.parse.quote(u, safe="")
z.write(fn, z_fn)
def import_download_cache(filename_or_obj, urls=None, update_cache=False, pkgname='astropy'):
"""Imports the contents of a ZIP file into the cache.
Each member of the ZIP file should be named by a quoted version of the
URL whose contents it stores. These names are decoded with
:func:`~urllib.parse.unquote`.
Parameters
----------
filename_or_obj : str or file-like
Where the stored ZIP file is. Must be something the :mod:`~zipfile`
module can read from.
urls : set of str or list of str or None
The URLs to import from the ZIP file. The default is all
URLs in the file.
update_cache : bool, optional
If True, any entry in the ZIP file will overwrite the value in the
cache; if False, leave untouched any entry already in the cache.
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
See Also
--------
export_download_cache : export the contents the cache to of such a ZIP file
import_file_to_cache : import a single file directly
"""
with zipfile.ZipFile(filename_or_obj, 'r') as z, TemporaryDirectory() as d:
for i, zf in enumerate(z.infolist()):
url = urllib.parse.unquote(zf.filename)
# FIXME(aarchiba): do we want some kind of validation on this URL?
# urllib.parse might do something sensible...but what URLs might
# they have?
# is_url in this file is probably a good check, not just here
# but throughout this file.
if urls is not None and url not in urls:
continue
if not update_cache and is_url_in_cache(url, pkgname=pkgname):
continue
f_temp_name = os.path.join(d, str(i))
with z.open(zf) as f_zip, open(f_temp_name, "wb") as f_temp:
block = f_zip.read(conf.download_block_size)
while block:
f_temp.write(block)
block = f_zip.read(conf.download_block_size)
import_file_to_cache(url, f_temp_name,
remove_original=True,
pkgname=pkgname)
|
9db3169917e820a119d9d28099e9b609a905f139870598184148d1b252c80a81 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
A "grab bag" of relatively small general-purpose utilities that don't have
a clear module/package to live in.
"""
import abc
import contextlib
import difflib
import inspect
import json
import os
import signal
import sys
import traceback
import unicodedata
import locale
import threading
import re
from contextlib import contextmanager
from collections import defaultdict, OrderedDict
from astropy.utils.decorators import deprecated
__all__ = ['isiterable', 'silence', 'format_exception', 'NumpyRNGContext',
'find_api_page', 'is_path_hidden', 'walk_skip_hidden',
'JsonCustomEncoder', 'indent', 'dtype_bytes_or_chars',
'OrderedDescriptor', 'OrderedDescriptorContainer']
# Because they are deprecated.
__doctest_skip__ = ['OrderedDescriptor', 'OrderedDescriptorContainer']
NOT_OVERWRITING_MSG = ('File {} already exists. If you mean to replace it '
'then use the argument "overwrite=True".')
# A useful regex for tests.
_NOT_OVERWRITING_MSG_MATCH = (r'File .* already exists\. If you mean to '
r'replace it then use the argument '
r'"overwrite=True"\.')
def isiterable(obj):
"""Returns `True` if the given object is iterable."""
try:
iter(obj)
return True
except TypeError:
return False
def indent(s, shift=1, width=4):
"""Indent a block of text. The indentation is applied to each line."""
indented = '\n'.join(' ' * (width * shift) + l if l else ''
for l in s.splitlines())
if s[-1] == '\n':
indented += '\n'
return indented
class _DummyFile:
"""A noop writeable object."""
def write(self, s):
pass
@contextlib.contextmanager
def silence():
"""A context manager that silences sys.stdout and sys.stderr."""
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = _DummyFile()
sys.stderr = _DummyFile()
yield
sys.stdout = old_stdout
sys.stderr = old_stderr
def format_exception(msg, *args, **kwargs):
"""
Given an exception message string, uses new-style formatting arguments
``{filename}``, ``{lineno}``, ``{func}`` and/or ``{text}`` to fill in
information about the exception that occurred. For example:
try:
1/0
except:
raise ZeroDivisionError(
format_except('A divide by zero occurred in {filename} at '
'line {lineno} of function {func}.'))
Any additional positional or keyword arguments passed to this function are
also used to format the message.
.. note::
This uses `sys.exc_info` to gather up the information needed to fill
in the formatting arguments. Since `sys.exc_info` is not carried
outside a handled exception, it's not wise to use this
outside of an ``except`` clause - if it is, this will substitute
'<unknown>' for the 4 formatting arguments.
"""
tb = traceback.extract_tb(sys.exc_info()[2], limit=1)
if len(tb) > 0:
filename, lineno, func, text = tb[0]
else:
filename = lineno = func = text = '<unknown>'
return msg.format(*args, filename=filename, lineno=lineno, func=func,
text=text, **kwargs)
class NumpyRNGContext:
"""
A context manager (for use with the ``with`` statement) that will seed the
numpy random number generator (RNG) to a specific value, and then restore
the RNG state back to whatever it was before.
This is primarily intended for use in the astropy testing suit, but it
may be useful in ensuring reproducibility of Monte Carlo simulations in a
science context.
Parameters
----------
seed : int
The value to use to seed the numpy RNG
Examples
--------
A typical use case might be::
with NumpyRNGContext(<some seed value you pick>):
from numpy import random
randarr = random.randn(100)
... run your test using `randarr` ...
#Any code using numpy.random at this indent level will act just as it
#would have if it had been before the with statement - e.g. whatever
#the default seed is.
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
from numpy import random
self.startstate = random.get_state()
random.seed(self.seed)
def __exit__(self, exc_type, exc_value, traceback):
from numpy import random
random.set_state(self.startstate)
def find_api_page(obj, version=None, openinbrowser=True, timeout=None):
"""
Determines the URL of the API page for the specified object, and
optionally open that page in a web browser.
.. note::
You must be connected to the internet for this to function even if
``openinbrowser`` is `False`, unless you provide a local version of
the documentation to ``version`` (e.g., ``file:///path/to/docs``).
Parameters
----------
obj
The object to open the docs for or its fully-qualified name
(as a str).
version : str
The doc version - either a version number like '0.1', 'dev' for
the development/latest docs, or a URL to point to a specific
location that should be the *base* of the documentation. Defaults to
latest if you are on aren't on a release, otherwise, the version you
are on.
openinbrowser : bool
If `True`, the `webbrowser` package will be used to open the doc
page in a new web browser window.
timeout : number, optional
The number of seconds to wait before timing-out the query to
the astropy documentation. If not given, the default python
stdlib timeout will be used.
Returns
-------
url : str
The loaded URL
Raises
------
ValueError
If the documentation can't be found
"""
import webbrowser
from zlib import decompress
from astropy.utils.data import get_readable_fileobj
if (not isinstance(obj, str) and
hasattr(obj, '__module__') and
hasattr(obj, '__name__')):
obj = obj.__module__ + '.' + obj.__name__
elif inspect.ismodule(obj):
obj = obj.__name__
if version is None:
from astropy import version
if version.release:
version = 'v' + version.version
else:
version = 'dev'
if '://' in version:
if version.endswith('index.html'):
baseurl = version[:-10]
elif version.endswith('/'):
baseurl = version
else:
baseurl = version + '/'
elif version == 'dev' or version == 'latest':
baseurl = 'http://devdocs.astropy.org/'
else:
baseurl = f'https://docs.astropy.org/en/{version}/'
# Custom request headers; see
# https://github.com/astropy/astropy/issues/8990
url = baseurl + 'objects.inv'
headers = {'User-Agent': f'Astropy/{version}'}
with get_readable_fileobj(url, encoding='binary', remote_timeout=timeout,
http_headers=headers) as uf:
oiread = uf.read()
# need to first read/remove the first four lines, which have info before
# the compressed section with the actual object inventory
idx = -1
headerlines = []
for _ in range(4):
oldidx = idx
idx = oiread.index(b'\n', oldidx + 1)
headerlines.append(oiread[(oldidx+1):idx].decode('utf-8'))
# intersphinx version line, project name, and project version
ivers, proj, vers, compr = headerlines
if 'The remainder of this file is compressed using zlib' not in compr:
raise ValueError('The file downloaded from {} does not seem to be'
'the usual Sphinx objects.inv format. Maybe it '
'has changed?'.format(baseurl + 'objects.inv'))
compressed = oiread[(idx+1):]
decompressed = decompress(compressed).decode('utf-8')
resurl = None
for l in decompressed.strip().splitlines():
ls = l.split()
name = ls[0]
loc = ls[3]
if loc.endswith('$'):
loc = loc[:-1] + name
if name == obj:
resurl = baseurl + loc
break
if resurl is None:
raise ValueError(f'Could not find the docs for the object {obj}')
elif openinbrowser:
webbrowser.open(resurl)
return resurl
def signal_number_to_name(signum):
"""
Given an OS signal number, returns a signal name. If the signal
number is unknown, returns ``'UNKNOWN'``.
"""
# Since these numbers and names are platform specific, we use the
# builtin signal module and build a reverse mapping.
signal_to_name_map = {k: v for v, k in signal.__dict__.items()
if v.startswith('SIG')}
return signal_to_name_map.get(signum, 'UNKNOWN')
if sys.platform == 'win32':
import ctypes
def _has_hidden_attribute(filepath):
"""
Returns True if the given filepath has the hidden attribute on
MS-Windows. Based on a post here:
https://stackoverflow.com/questions/284115/cross-platform-hidden-file-detection
"""
if isinstance(filepath, bytes):
filepath = filepath.decode(sys.getfilesystemencoding())
try:
attrs = ctypes.windll.kernel32.GetFileAttributesW(filepath)
result = bool(attrs & 2) and attrs != -1
except AttributeError:
result = False
return result
else:
def _has_hidden_attribute(filepath):
return False
def is_path_hidden(filepath):
"""
Determines if a given file or directory is hidden.
Parameters
----------
filepath : str
The path to a file or directory
Returns
-------
hidden : bool
Returns `True` if the file is hidden
"""
name = os.path.basename(os.path.abspath(filepath))
if isinstance(name, bytes):
is_dotted = name.startswith(b'.')
else:
is_dotted = name.startswith('.')
return is_dotted or _has_hidden_attribute(filepath)
def walk_skip_hidden(top, onerror=None, followlinks=False):
"""
A wrapper for `os.walk` that skips hidden files and directories.
This function does not have the parameter ``topdown`` from
`os.walk`: the directories must always be recursed top-down when
using this function.
See also
--------
os.walk : For a description of the parameters
"""
for root, dirs, files in os.walk(
top, topdown=True, onerror=onerror,
followlinks=followlinks):
# These lists must be updated in-place so os.walk will skip
# hidden directories
dirs[:] = [d for d in dirs if not is_path_hidden(d)]
files[:] = [f for f in files if not is_path_hidden(f)]
yield root, dirs, files
class JsonCustomEncoder(json.JSONEncoder):
"""Support for data types that JSON default encoder
does not do.
This includes:
* Numpy array or number
* Complex number
* Set
* Bytes
* astropy.UnitBase
* astropy.Quantity
Examples
--------
>>> import json
>>> import numpy as np
>>> from astropy.utils.misc import JsonCustomEncoder
>>> json.dumps(np.arange(3), cls=JsonCustomEncoder)
'[0, 1, 2]'
"""
def default(self, obj):
from astropy import units as u
import numpy as np
if isinstance(obj, u.Quantity):
return dict(value=obj.value, unit=obj.unit.to_string())
if isinstance(obj, (np.number, np.ndarray)):
return obj.tolist()
elif isinstance(obj, complex):
return [obj.real, obj.imag]
elif isinstance(obj, set):
return list(obj)
elif isinstance(obj, bytes): # pragma: py3
return obj.decode()
elif isinstance(obj, (u.UnitBase, u.FunctionUnitBase)):
if obj == u.dimensionless_unscaled:
obj = 'dimensionless_unit'
else:
return obj.to_string()
return json.JSONEncoder.default(self, obj)
def strip_accents(s):
"""
Remove accents from a Unicode string.
This helps with matching "ångström" to "angstrom", for example.
"""
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
def did_you_mean(s, candidates, n=3, cutoff=0.8, fix=None):
"""
When a string isn't found in a set of candidates, we can be nice
to provide a list of alternatives in the exception. This
convenience function helps to format that part of the exception.
Parameters
----------
s : str
candidates : sequence of str or dict of str keys
n : int
The maximum number of results to include. See
`difflib.get_close_matches`.
cutoff : float
In the range [0, 1]. Possibilities that don't score at least
that similar to word are ignored. See
`difflib.get_close_matches`.
fix : callable
A callable to modify the results after matching. It should
take a single string and return a sequence of strings
containing the fixed matches.
Returns
-------
message : str
Returns the string "Did you mean X, Y, or Z?", or the empty
string if no alternatives were found.
"""
if isinstance(s, str):
s = strip_accents(s)
s_lower = s.lower()
# Create a mapping from the lower case name to all capitalization
# variants of that name.
candidates_lower = {}
for candidate in candidates:
candidate_lower = candidate.lower()
candidates_lower.setdefault(candidate_lower, [])
candidates_lower[candidate_lower].append(candidate)
# The heuristic here is to first try "singularizing" the word. If
# that doesn't match anything use difflib to find close matches in
# original, lower and upper case.
if s_lower.endswith('s') and s_lower[:-1] in candidates_lower:
matches = [s_lower[:-1]]
else:
matches = difflib.get_close_matches(
s_lower, candidates_lower, n=n, cutoff=cutoff)
if len(matches):
capitalized_matches = set()
for match in matches:
capitalized_matches.update(candidates_lower[match])
matches = capitalized_matches
if fix is not None:
mapped_matches = []
for match in matches:
mapped_matches.extend(fix(match))
matches = mapped_matches
matches = list(set(matches))
matches = sorted(matches)
if len(matches) == 1:
matches = matches[0]
else:
matches = (', '.join(matches[:-1]) + ' or ' +
matches[-1])
return f'Did you mean {matches}?'
return ''
_ordered_descriptor_deprecation_message = """\
The {func} {obj_type} is deprecated and may be removed in a future version.
You can replace its functionality with a combination of the
__init_subclass__ and __set_name__ magic methods introduced in Python 3.6.
See https://github.com/astropy/astropy/issues/11094 for recipes on how to
replicate their functionality.
"""
@deprecated('4.3', _ordered_descriptor_deprecation_message)
class OrderedDescriptor(metaclass=abc.ABCMeta):
"""
Base class for descriptors whose order in the class body should be
preserved. Intended for use in concert with the
`OrderedDescriptorContainer` metaclass.
Subclasses of `OrderedDescriptor` must define a value for a class attribute
called ``_class_attribute_``. This is the name of a class attribute on the
*container* class for these descriptors, which will be set to an
`~collections.OrderedDict` at class creation time. This
`~collections.OrderedDict` will contain a mapping of all class attributes
that were assigned instances of the `OrderedDescriptor` subclass, to the
instances themselves. See the documentation for
`OrderedDescriptorContainer` for a concrete example.
Optionally, subclasses of `OrderedDescriptor` may define a value for a
class attribute called ``_name_attribute_``. This should be the name of
an attribute on instances of the subclass. When specified, during
creation of a class containing these descriptors, the name attribute on
each instance will be set to the name of the class attribute it was
assigned to on the class.
.. note::
Although this class is intended for use with *descriptors* (i.e.
classes that define any of the ``__get__``, ``__set__``, or
``__delete__`` magic methods), this base class is not itself a
descriptor, and technically this could be used for classes that are
not descriptors too. However, use with descriptors is the original
intended purpose.
"""
# This id increments for each OrderedDescriptor instance created, so they
# are always ordered in the order they were created. Class bodies are
# guaranteed to be executed from top to bottom. Not sure if this is
# thread-safe though.
_nextid = 1
@property
@abc.abstractmethod
def _class_attribute_(self):
"""
Subclasses should define this attribute to the name of an attribute on
classes containing this subclass. That attribute will contain the mapping
of all instances of that `OrderedDescriptor` subclass defined in the class
body. If the same descriptor needs to be used with different classes,
each with different names of this attribute, multiple subclasses will be
needed.
"""
_name_attribute_ = None
"""
Subclasses may optionally define this attribute to specify the name of an
attribute on instances of the class that should be filled with the
instance's attribute name at class creation time.
"""
def __init__(self, *args, **kwargs):
# The _nextid attribute is shared across all subclasses so that
# different subclasses of OrderedDescriptors can be sorted correctly
# between themselves
self.__order = OrderedDescriptor._nextid
OrderedDescriptor._nextid += 1
super().__init__()
def __lt__(self, other):
"""
Defined for convenient sorting of `OrderedDescriptor` instances, which
are defined to sort in their creation order.
"""
if (isinstance(self, OrderedDescriptor) and
isinstance(other, OrderedDescriptor)):
try:
return self.__order < other.__order
except AttributeError:
raise RuntimeError(
'Could not determine ordering for {} and {}; at least '
'one of them is not calling super().__init__ in its '
'__init__.'.format(self, other))
else:
return NotImplemented
@deprecated('4.3', _ordered_descriptor_deprecation_message)
class OrderedDescriptorContainer(type):
"""
Classes should use this metaclass if they wish to use `OrderedDescriptor`
attributes, which are class attributes that "remember" the order in which
they were defined in the class body.
Every subclass of `OrderedDescriptor` has an attribute called
``_class_attribute_``. For example, if we have
.. code:: python
class ExampleDecorator(OrderedDescriptor):
_class_attribute_ = '_examples_'
Then when a class with the `OrderedDescriptorContainer` metaclass is
created, it will automatically be assigned a class attribute ``_examples_``
referencing an `~collections.OrderedDict` containing all instances of
``ExampleDecorator`` defined in the class body, mapped to by the names of
the attributes they were assigned to.
When subclassing a class with this metaclass, the descriptor dict (i.e.
``_examples_`` in the above example) will *not* contain descriptors
inherited from the base class. That is, this only works by default with
decorators explicitly defined in the class body. However, the subclass
*may* define an attribute ``_inherit_decorators_`` which lists
`OrderedDescriptor` classes that *should* be added from base classes.
See the examples section below for an example of this.
Examples
--------
>>> from astropy.utils import OrderedDescriptor, OrderedDescriptorContainer
>>> class TypedAttribute(OrderedDescriptor):
... \"\"\"
... Attributes that may only be assigned objects of a specific type,
... or subclasses thereof. For some reason we care about their order.
... \"\"\"
...
... _class_attribute_ = 'typed_attributes'
... _name_attribute_ = 'name'
... # A default name so that instances not attached to a class can
... # still be repr'd; useful for debugging
... name = '<unbound>'
...
... def __init__(self, type):
... # Make sure not to forget to call the super __init__
... super().__init__()
... self.type = type
...
... def __get__(self, obj, objtype=None):
... if obj is None:
... return self
... if self.name in obj.__dict__:
... return obj.__dict__[self.name]
... else:
... raise AttributeError(self.name)
...
... def __set__(self, obj, value):
... if not isinstance(value, self.type):
... raise ValueError('{0}.{1} must be of type {2!r}'.format(
... obj.__class__.__name__, self.name, self.type))
... obj.__dict__[self.name] = value
...
... def __delete__(self, obj):
... if self.name in obj.__dict__:
... del obj.__dict__[self.name]
... else:
... raise AttributeError(self.name)
...
... def __repr__(self):
... if isinstance(self.type, tuple) and len(self.type) > 1:
... typestr = '({0})'.format(
... ', '.join(t.__name__ for t in self.type))
... else:
... typestr = self.type.__name__
... return '<{0}(name={1}, type={2})>'.format(
... self.__class__.__name__, self.name, typestr)
...
Now let's create an example class that uses this ``TypedAttribute``::
>>> class Point2D(metaclass=OrderedDescriptorContainer):
... x = TypedAttribute((float, int))
... y = TypedAttribute((float, int))
...
... def __init__(self, x, y):
... self.x, self.y = x, y
...
>>> p1 = Point2D(1.0, 2.0)
>>> p1.x
1.0
>>> p1.y
2.0
>>> p2 = Point2D('a', 'b') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: Point2D.x must be of type (float, int>)
We see that ``TypedAttribute`` works more or less as advertised, but
there's nothing special about that. Let's see what
`OrderedDescriptorContainer` did for us::
>>> Point2D.typed_attributes
OrderedDict([('x', <TypedAttribute(name=x, type=(float, int))>),
('y', <TypedAttribute(name=y, type=(float, int))>)])
If we create a subclass, it does *not* by default add inherited descriptors
to ``typed_attributes``::
>>> class Point3D(Point2D):
... z = TypedAttribute((float, int))
...
>>> Point3D.typed_attributes
OrderedDict([('z', <TypedAttribute(name=z, type=(float, int))>)])
However, if we specify ``_inherit_descriptors_`` from ``Point2D`` then
it will do so::
>>> class Point3D(Point2D):
... _inherit_descriptors_ = (TypedAttribute,)
... z = TypedAttribute((float, int))
...
>>> Point3D.typed_attributes
OrderedDict([('x', <TypedAttribute(name=x, type=(float, int))>),
('y', <TypedAttribute(name=y, type=(float, int))>),
('z', <TypedAttribute(name=z, type=(float, int))>)])
.. note::
Hopefully it is clear from these examples that this construction
also allows a class of type `OrderedDescriptorContainer` to use
multiple different `OrderedDescriptor` classes simultaneously.
"""
_inherit_descriptors_ = ()
def __init__(cls, cls_name, bases, members):
descriptors = defaultdict(list)
seen = set()
inherit_descriptors = ()
descr_bases = {}
for mro_cls in cls.__mro__:
for name, obj in mro_cls.__dict__.items():
if name in seen:
# Checks if we've already seen an attribute of the given
# name (if so it will override anything of the same name in
# any base class)
continue
seen.add(name)
if (not isinstance(obj, OrderedDescriptor) or
(inherit_descriptors and
not isinstance(obj, inherit_descriptors))):
# The second condition applies when checking any
# subclasses, to see if we can inherit any descriptors of
# the given type from subclasses (by default inheritance is
# disabled unless the class has _inherit_descriptors_
# defined)
continue
if obj._name_attribute_ is not None:
setattr(obj, obj._name_attribute_, name)
# Don't just use the descriptor's class directly; instead go
# through its MRO and find the class on which _class_attribute_
# is defined directly. This way subclasses of some
# OrderedDescriptor *may* override _class_attribute_ and have
# its own _class_attribute_, but by default all subclasses of
# some OrderedDescriptor are still grouped together
# TODO: It might be worth clarifying this in the docs
if obj.__class__ not in descr_bases:
for obj_cls_base in obj.__class__.__mro__:
if '_class_attribute_' in obj_cls_base.__dict__:
descr_bases[obj.__class__] = obj_cls_base
descriptors[obj_cls_base].append((obj, name))
break
else:
# Make sure to put obj first for sorting purposes
obj_cls_base = descr_bases[obj.__class__]
descriptors[obj_cls_base].append((obj, name))
if not getattr(mro_cls, '_inherit_descriptors_', False):
# If _inherit_descriptors_ is undefined then we don't inherit
# any OrderedDescriptors from any of the base classes, and
# there's no reason to continue through the MRO
break
else:
inherit_descriptors = mro_cls._inherit_descriptors_
for descriptor_cls, instances in descriptors.items():
instances.sort()
instances = OrderedDict((key, value) for value, key in instances)
setattr(cls, descriptor_cls._class_attribute_, instances)
super().__init__(cls_name, bases, members)
LOCALE_LOCK = threading.Lock()
@contextmanager
def _set_locale(name):
"""
Context manager to temporarily set the locale to ``name``.
An example is setting locale to "C" so that the C strtod()
function will use "." as the decimal point to enable consistent
numerical string parsing.
Note that one cannot nest multiple _set_locale() context manager
statements as this causes a threading lock.
This code taken from https://stackoverflow.com/questions/18593661/how-do-i-strftime-a-date-object-in-a-different-locale.
Parameters
==========
name : str
Locale name, e.g. "C" or "fr_FR".
"""
name = str(name)
with LOCALE_LOCK:
saved = locale.setlocale(locale.LC_ALL)
if saved == name:
# Don't do anything if locale is already the requested locale
yield
else:
try:
locale.setlocale(locale.LC_ALL, name)
yield
finally:
locale.setlocale(locale.LC_ALL, saved)
set_locale = deprecated('4.0')(_set_locale)
set_locale.__doc__ = """Deprecated version of :func:`_set_locale` above.
See https://github.com/astropy/astropy/issues/9196
"""
def dtype_bytes_or_chars(dtype):
"""
Parse the number out of a dtype.str value like '<U5' or '<f8'.
See #5819 for discussion on the need for this function for getting
the number of characters corresponding to a string dtype.
Parameters
----------
dtype : numpy dtype object
Input dtype
Returns
-------
bytes_or_chars : int or None
Bits (for numeric types) or characters (for string types)
"""
match = re.search(r'(\d+)$', dtype.str)
out = int(match.group(1)) if match else None
return out
def _hungry_for(option): # pragma: no cover
"""
Open browser loaded with ``option`` options near you.
*Disclaimers: Payments not included. Astropy is not
responsible for any liability from using this function.*
.. note:: Accuracy depends on your browser settings.
"""
import webbrowser
webbrowser.open(f'https://www.google.com/search?q={option}+near+me')
def pizza(): # pragma: no cover
"""``/pizza``"""
_hungry_for('pizza')
def coffee(is_adam=False, is_brigitta=False): # pragma: no cover
"""``/coffee``"""
if is_adam and is_brigitta:
raise ValueError('There can be only one!')
if is_adam:
option = 'fresh+third+wave+coffee'
elif is_brigitta:
option = 'decent+espresso'
else:
option = 'coffee'
_hungry_for(option)
|
eef8dbbca4d3ff9591503be63fb5229b6218a95b3a23d760128f1de592566ff9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
__doctest_skip__ = ['quantity_support']
def quantity_support(format='latex_inline'):
"""
Enable support for plotting `astropy.units.Quantity` instances in
matplotlib.
May be (optionally) used with a ``with`` statement.
>>> import matplotlib.pyplot as plt
>>> from astropy import units as u
>>> from astropy import visualization
>>> with visualization.quantity_support():
... plt.figure()
... plt.plot([1, 2, 3] * u.m)
[...]
... plt.plot([101, 125, 150] * u.cm)
[...]
... plt.draw()
Parameters
----------
format : `astropy.units.format.Base` instance or str
The name of a format or a formatter object. If not
provided, defaults to ``latex_inline``.
"""
from astropy import units as u
# import Angle just so we have a more or less complete list of Quantity
# subclasses loaded - matplotlib needs them all separately!
# NOTE: in matplotlib >=3.2, subclasses will be recognized automatically,
# and once that becomes our minimum version, we can remove this,
# adding just u.Quantity itself to the registry.
from astropy.coordinates import Angle # noqa
from matplotlib import units
from matplotlib import ticker
# Get all subclass for Quantity, since matplotlib checks on class,
# not subclass.
def all_issubclass(cls):
return {cls}.union(
[s for c in cls.__subclasses__() for s in all_issubclass(c)])
def rad_fn(x, pos=None):
n = int((x / np.pi) * 2.0 + 0.25)
if n == 0:
return '0'
elif n == 1:
return 'π/2'
elif n == 2:
return 'π'
elif n % 2 == 0:
return f'{n // 2}π'
else:
return f'{n}π/2'
class MplQuantityConverter(units.ConversionInterface):
_all_issubclass_quantity = all_issubclass(u.Quantity)
def __init__(self):
# Keep track of original converter in case the context manager is
# used in a nested way.
self._original_converter = {}
for cls in self._all_issubclass_quantity:
self._original_converter[cls] = units.registry.get(cls)
units.registry[cls] = self
@staticmethod
def axisinfo(unit, axis):
if unit == u.radian:
return units.AxisInfo(
majloc=ticker.MultipleLocator(base=np.pi/2),
majfmt=ticker.FuncFormatter(rad_fn),
label=unit.to_string(),
)
elif unit == u.degree:
return units.AxisInfo(
majloc=ticker.AutoLocator(),
majfmt=ticker.FormatStrFormatter('%i°'),
label=unit.to_string(),
)
elif unit is not None:
return units.AxisInfo(label=unit.to_string(format))
return None
@staticmethod
def convert(val, unit, axis):
if isinstance(val, u.Quantity):
return val.to_value(unit)
elif isinstance(val, list) and val and isinstance(val[0], u.Quantity):
return [v.to_value(unit) for v in val]
else:
return val
@staticmethod
def default_units(x, axis):
if hasattr(x, 'unit'):
return x.unit
return None
def __enter__(self):
return self
def __exit__(self, type, value, tb):
for cls in self._all_issubclass_quantity:
if self._original_converter[cls] is None:
del units.registry[cls]
else:
units.registry[cls] = self._original_converter[cls]
return MplQuantityConverter()
|
d6176d0d3e938b40bd06fb85a83be8c8cea48f0a25dc65e2412e21cd62d3b6ff | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from datetime import datetime
from astropy.time import Time
from astropy import units as u
__all__ = ['time_support']
__doctest_requires__ = {'time_support': ['matplotlib']}
UNSUPPORTED_FORMATS = ('datetime', 'datetime64')
YMDHMS_FORMATS = ('fits', 'iso', 'isot', 'yday')
STR_FORMATS = YMDHMS_FORMATS + ('byear_str', 'jyear_str')
def time_support(*, scale=None, format=None, simplify=True):
"""
Enable support for plotting `astropy.time.Time` instances in
matplotlib.
May be (optionally) used with a ``with`` statement.
>>> import matplotlib.pyplot as plt
>>> from astropy import units as u
>>> from astropy import visualization
>>> with visualization.time_support(): # doctest: +IGNORE_OUTPUT
... plt.figure()
... plt.plot(Time(['2016-03-22T12:30:31', '2016-03-22T12:30:38', '2016-03-22T12:34:40']))
... plt.draw()
Parameters
----------
scale : str, optional
The time scale to use for the times on the axis. If not specified,
the scale of the first Time object passed to Matplotlib is used.
format : str, optional
The time format to use for the times on the axis. If not specified,
the format of the first Time object passed to Matplotlib is used.
simplify : bool, optional
If possible, simplify labels, e.g. by removing 00:00:00.000 times from
ISO strings if all labels fall on that time.
"""
import matplotlib.units as units
from matplotlib.ticker import MaxNLocator, ScalarFormatter
from astropy.visualization.wcsaxes.utils import select_step_hour, select_step_scalar
class AstropyTimeLocator(MaxNLocator):
# Note: we default to AutoLocator since many time formats
# can just use this.
def __init__(self, converter, *args, **kwargs):
kwargs['nbins'] = 4
super().__init__(*args, **kwargs)
self._converter = converter
def tick_values(self, vmin, vmax):
# Where we put the ticks depends on the format we are using
if self._converter.format in YMDHMS_FORMATS:
# If we are here, we need to check what the range of values
# is and decide how to find tick locations accordingly
vrange = vmax - vmin
if (self._converter.format != 'yday' and vrange > 31) or vrange > 366: # greater than a month
# We need to be careful here since not all years and months have
# the same length
# Start off by converting the values from the range to
# datetime objects, so that we can easily extract the year and
# month.
tmin = Time(vmin, scale=self._converter.scale, format='mjd').datetime
tmax = Time(vmax, scale=self._converter.scale, format='mjd').datetime
# Find the range of years
ymin = tmin.year
ymax = tmax.year
if ymax > ymin + 1: # greater than a year
# Find the step we want to use
ystep = int(select_step_scalar(max(1, (ymax - ymin) / 3)))
ymin = ystep * (ymin // ystep)
# Generate the years for these steps
times = []
for year in range(ymin, ymax + 1, ystep):
times.append(datetime(year=year, month=1, day=1))
else: # greater than a month but less than a year
mmin = tmin.month
mmax = tmax.month + 12 * (ymax - ymin)
mstep = int(select_step_scalar(max(1, (mmax - mmin) / 3)))
mmin = mstep * max(1, mmin // mstep)
# Generate the months for these steps
times = []
for month in range(mmin, mmax + 1, mstep):
times.append(datetime(year=ymin + (month - 1) // 12,
month=(month - 1) % 12 + 1,
day=1))
# Convert back to MJD
values = Time(times, scale=self._converter.scale).mjd
elif vrange > 1: # greater than a day
self.set_params(steps=[1, 2, 5, 10])
values = super().tick_values(vmin, vmax)
else:
# Determine ideal step
dv = (vmax - vmin) / 3 * 24 << u.hourangle
# And round to nearest sensible value
dv = select_step_hour(dv).to_value(u.hourangle) / 24
# Determine tick locations
imin = np.ceil(vmin / dv)
imax = np.floor(vmax / dv)
values = np.arange(imin, imax + 1, dtype=np.int64) * dv
else:
values = super().tick_values(vmin, vmax)
# Get rid of values outside of the input interval
values = values[(values >= vmin) & (values <= vmax)]
return values
def __call__(self):
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
class AstropyTimeFormatter(ScalarFormatter):
def __init__(self, converter, *args, **kwargs):
super().__init__(*args, **kwargs)
self._converter = converter
self.set_useOffset(False)
self.set_scientific(False)
def __call__(self, value, pos=None):
# Needed for Matplotlib <3.1
if self._converter.format in STR_FORMATS:
return self.format_ticks([value])[0]
else:
return super().__call__(value, pos=pos)
def format_ticks(self, values):
if len(values) == 0:
return []
if self._converter.format in YMDHMS_FORMATS:
times = Time(values, format='mjd', scale=self._converter.scale)
formatted = getattr(times, self._converter.format)
if self._converter.simplify:
if self._converter.format in ('fits', 'iso', 'isot'):
if all([x.endswith('00:00:00.000') for x in formatted]):
split = ' ' if self._converter.format == 'iso' else 'T'
formatted = [x.split(split)[0] for x in formatted]
elif self._converter.format == 'yday':
if all([x.endswith(':001:00:00:00.000') for x in formatted]):
formatted = [x.split(':', 1)[0] for x in formatted]
return formatted
elif self._converter.format == 'byear_str':
return Time(values, format='byear', scale=self._converter.scale).byear_str
elif self._converter.format == 'jyear_str':
return Time(values, format='jyear', scale=self._converter.scale).jyear_str
else:
return super().format_ticks(values)
class MplTimeConverter(units.ConversionInterface):
def __init__(self, scale=None, format=None, simplify=None):
super().__init__()
self.format = format
self.scale = scale
self.simplify = simplify
# Keep track of original converter in case the context manager is
# used in a nested way.
self._original_converter = units.registry.get(Time)
units.registry[Time] = self
@property
def format(self):
return self._format
@format.setter
def format(self, value):
if value in UNSUPPORTED_FORMATS:
raise ValueError(f'time_support does not support format={value}')
self._format = value
def __enter__(self):
return self
def __exit__(self, type, value, tb):
if self._original_converter is None:
del units.registry[Time]
else:
units.registry[Time] = self._original_converter
def default_units(self, x, axis):
if isinstance(x, tuple):
x = x[0]
if self.format is None:
self.format = x.format
if self.scale is None:
self.scale = x.scale
return 'astropy_time'
def convert(self, value, unit, axis):
"""
Convert a Time value to a scalar or array.
"""
scaled = getattr(value, self.scale)
if self.format in YMDHMS_FORMATS:
return scaled.mjd
elif self.format == 'byear_str':
return scaled.byear
elif self.format == 'jyear_str':
return scaled.jyear
else:
return getattr(scaled, self.format)
def axisinfo(self, unit, axis):
"""
Return major and minor tick locators and formatters.
"""
majloc = AstropyTimeLocator(self)
majfmt = AstropyTimeFormatter(self)
return units.AxisInfo(majfmt=majfmt,
majloc=majloc,
label=f'Time ({self.scale})')
return MplTimeConverter(scale=scale, format=format, simplify=simplify)
|
31b7fe928f6335ccd03fcd3f6323ecd148db3e6255b0e1a6d307137dec81215f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module includes helper functions for array operations.
"""
import numpy as np
from .decorators import support_nddata
__all__ = ['reshape_as_blocks', 'block_reduce', 'block_replicate']
def _process_block_inputs(data, block_size):
data = np.asanyarray(data)
block_size = np.atleast_1d(block_size)
if np.any(block_size <= 0):
raise ValueError('block_size elements must be strictly positive')
if data.ndim > 1 and len(block_size) == 1:
block_size = np.repeat(block_size, data.ndim)
if len(block_size) != data.ndim:
raise ValueError('block_size must be a scalar or have the same '
'length as the number of data dimensions')
block_size_int = block_size.astype(int)
if np.any(block_size_int != block_size): # e.g., 2.0 is OK, 2.1 is not
raise ValueError('block_size elements must be integers')
return data, block_size_int
def reshape_as_blocks(data, block_size):
"""
Reshape a data array into blocks.
This is useful to efficiently apply functions on block subsets of
the data instead of using loops. The reshaped array is a view of
the input data array.
.. versionadded:: 4.1
Parameters
----------
data : ndarray
The input data array.
block_size : int or array-like (int)
The integer block size along each axis. If ``block_size`` is a
scalar and ``data`` has more than one dimension, then
``block_size`` will be used for for every axis. Each dimension
of ``block_size`` must divide evenly into the corresponding
dimension of ``data``.
Returns
-------
output : ndarray
The reshaped array as a view of the input ``data`` array.
Examples
--------
>>> import numpy as np
>>> from astropy.nddata import reshape_as_blocks
>>> data = np.arange(16).reshape(4, 4)
>>> data
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> reshape_as_blocks(data, (2, 2))
array([[[[ 0, 1],
[ 4, 5]],
[[ 2, 3],
[ 6, 7]]],
[[[ 8, 9],
[12, 13]],
[[10, 11],
[14, 15]]]])
"""
data, block_size = _process_block_inputs(data, block_size)
if np.any(np.mod(data.shape, block_size) != 0):
raise ValueError('Each dimension of block_size must divide evenly '
'into the corresponding dimension of data')
nblocks = np.array(data.shape) // block_size
new_shape = tuple(k for ij in zip(nblocks, block_size) for k in ij)
nblocks_idx = tuple(range(0, len(new_shape), 2)) # even indices
block_idx = tuple(range(1, len(new_shape), 2)) # odd indices
return data.reshape(new_shape).transpose(nblocks_idx + block_idx)
@support_nddata
def block_reduce(data, block_size, func=np.sum):
"""
Downsample a data array by applying a function to local blocks.
If ``data`` is not perfectly divisible by ``block_size`` along a
given axis then the data will be trimmed (from the end) along that
axis.
Parameters
----------
data : array-like
The data to be resampled.
block_size : int or array-like (int)
The integer block size along each axis. If ``block_size`` is a
scalar and ``data`` has more than one dimension, then
``block_size`` will be used for for every axis.
func : callable, optional
The method to use to downsample the data. Must be a callable
that takes in a 4D `~numpy.ndarray` (the 2D `~numpy.ndarray`
input into `block_reduce` gets reshaped as 4D) and has an
``axis`` keyword that accepts tuples. This function will be
called with ``axis=(2, 3)`` and it should return a 2D array. The
default is `~numpy.sum`, which provides block summation (and
conserves the data sum).
Returns
-------
output : array-like
The resampled data.
Examples
--------
>>> import numpy as np
>>> from astropy.nddata import block_reduce
>>> data = np.arange(16).reshape(4, 4)
>>> block_reduce(data, 2) # doctest: +FLOAT_CMP
array([[10, 18],
[42, 50]])
>>> block_reduce(data, 2, func=np.mean) # doctest: +FLOAT_CMP
array([[ 2.5, 4.5],
[ 10.5, 12.5]])
"""
data, block_size = _process_block_inputs(data, block_size)
nblocks = np.array(data.shape) // block_size
size_init = nblocks * block_size # evenly-divisible size
# trim data if necessary
for axis in range(data.ndim):
if data.shape[axis] != size_init[axis]:
data = data.swapaxes(0, axis)
data = data[:size_init[axis]]
data = data.swapaxes(0, axis)
reshaped = reshape_as_blocks(data, block_size)
axis = tuple(range(data.ndim, reshaped.ndim))
return func(reshaped, axis=axis)
@support_nddata
def block_replicate(data, block_size, conserve_sum=True):
"""
Upsample a data array by block replication.
Parameters
----------
data : array-like
The data to be block replicated.
block_size : int or array-like (int)
The integer block size along each axis. If ``block_size`` is a
scalar and ``data`` has more than one dimension, then
``block_size`` will be used for for every axis.
conserve_sum : bool, optional
If `True` (the default) then the sum of the output
block-replicated data will equal the sum of the input ``data``.
Returns
-------
output : array-like
The block-replicated data.
Examples
--------
>>> import numpy as np
>>> from astropy.nddata import block_replicate
>>> data = np.array([[0., 1.], [2., 3.]])
>>> block_replicate(data, 2) # doctest: +FLOAT_CMP
array([[0. , 0. , 0.25, 0.25],
[0. , 0. , 0.25, 0.25],
[0.5 , 0.5 , 0.75, 0.75],
[0.5 , 0.5 , 0.75, 0.75]])
>>> block_replicate(data, 2, conserve_sum=False) # doctest: +FLOAT_CMP
array([[0., 0., 1., 1.],
[0., 0., 1., 1.],
[2., 2., 3., 3.],
[2., 2., 3., 3.]])
"""
data, block_size = _process_block_inputs(data, block_size)
for i in range(data.ndim):
data = np.repeat(data, block_size[i], axis=i)
if conserve_sum:
data = data / float(np.prod(block_size))
return data
|
6831ea9108bf990e01cabafc9d87ad54fe09aaa28244042c571550f4fb2a3b0b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""This module implements the base CCDData class."""
import itertools
import numpy as np
from .compat import NDDataArray
from .nduncertainty import (
StdDevUncertainty, NDUncertainty, VarianceUncertainty, InverseVariance)
from astropy.io import fits, registry
from astropy import units as u
from astropy import log
from astropy.wcs import WCS
from astropy.utils.decorators import sharedmethod
__all__ = ['CCDData', 'fits_ccddata_reader', 'fits_ccddata_writer']
_known_uncertainties = (StdDevUncertainty, VarianceUncertainty, InverseVariance)
_unc_name_to_cls = {cls.__name__: cls for cls in _known_uncertainties}
_unc_cls_to_name = {cls: cls.__name__ for cls in _known_uncertainties}
# Global value which can turn on/off the unit requirements when creating a
# CCDData. Should be used with care because several functions actually break
# if the unit is None!
_config_ccd_requires_unit = True
def _arithmetic(op):
"""Decorator factory which temporarily disables the need for a unit when
creating a new CCDData instance. The final result must have a unit.
Parameters
----------
op : function
The function to apply. Supported are:
- ``np.add``
- ``np.subtract``
- ``np.multiply``
- ``np.true_divide``
Notes
-----
Should only be used on CCDData ``add``, ``subtract``, ``divide`` or
``multiply`` because only these methods from NDArithmeticMixin are
overwritten.
"""
def decorator(func):
def inner(self, operand, operand2=None, **kwargs):
global _config_ccd_requires_unit
_config_ccd_requires_unit = False
result = self._prepare_then_do_arithmetic(op, operand,
operand2, **kwargs)
# Wrap it again as CCDData so it checks the final unit.
_config_ccd_requires_unit = True
return result.__class__(result)
inner.__doc__ = f"See `astropy.nddata.NDArithmeticMixin.{func.__name__}`."
return sharedmethod(inner)
return decorator
def _uncertainty_unit_equivalent_to_parent(uncertainty_type, unit, parent_unit):
if uncertainty_type is StdDevUncertainty:
return unit == parent_unit
elif uncertainty_type is VarianceUncertainty:
return unit == (parent_unit ** 2)
elif uncertainty_type is InverseVariance:
return unit == (1 / (parent_unit ** 2))
raise ValueError(f"unsupported uncertainty type: {uncertainty_type}")
class CCDData(NDDataArray):
"""A class describing basic CCD data.
The CCDData class is based on the NDData object and includes a data array,
uncertainty frame, mask frame, flag frame, meta data, units, and WCS
information for a single CCD image.
Parameters
----------
data : `~astropy.nddata.CCDData`-like or array-like
The actual data contained in this `~astropy.nddata.CCDData` object.
Note that the data will always be saved by *reference*, so you should
make a copy of the ``data`` before passing it in if that's the desired
behavior.
uncertainty : `~astropy.nddata.StdDevUncertainty`, \
`~astropy.nddata.VarianceUncertainty`, \
`~astropy.nddata.InverseVariance`, `numpy.ndarray` or \
None, optional
Uncertainties on the data. If the uncertainty is a `numpy.ndarray`, it
it assumed to be, and stored as, a `~astropy.nddata.StdDevUncertainty`.
Default is ``None``.
mask : `numpy.ndarray` or None, optional
Mask for the data, given as a boolean Numpy array with a shape
matching that of the data. The values must be `False` where
the data is *valid* and `True` when it is not (like Numpy
masked arrays). If ``data`` is a numpy masked array, providing
``mask`` here will causes the mask from the masked array to be
ignored.
Default is ``None``.
flags : `numpy.ndarray` or `~astropy.nddata.FlagCollection` or None, \
optional
Flags giving information about each pixel. These can be specified
either as a Numpy array of any type with a shape matching that of the
data, or as a `~astropy.nddata.FlagCollection` instance which has a
shape matching that of the data.
Default is ``None``.
wcs : `~astropy.wcs.WCS` or None, optional
WCS-object containing the world coordinate system for the data.
Default is ``None``.
meta : dict-like object or None, optional
Metadata for this object. "Metadata" here means all information that
is included with this object but not part of any other attribute
of this particular object, e.g. creation date, unique identifier,
simulation parameters, exposure time, telescope name, etc.
unit : `~astropy.units.Unit` or str, optional
The units of the data.
Default is ``None``.
.. warning::
If the unit is ``None`` or not otherwise specified it will raise a
``ValueError``
Raises
------
ValueError
If the ``uncertainty`` or ``mask`` inputs cannot be broadcast (e.g.,
match shape) onto ``data``.
Methods
-------
read(\\*args, \\**kwargs)
``Classmethod`` to create an CCDData instance based on a ``FITS`` file.
This method uses :func:`fits_ccddata_reader` with the provided
parameters.
write(\\*args, \\**kwargs)
Writes the contents of the CCDData instance into a new ``FITS`` file.
This method uses :func:`fits_ccddata_writer` with the provided
parameters.
Attributes
----------
known_invalid_fits_unit_strings
A dictionary that maps commonly-used fits unit name strings that are
technically invalid to the correct valid unit type (or unit string).
This is primarily for variant names like "ELECTRONS/S" which are not
formally valid, but are unambiguous and frequently enough encountered
that it is convenient to map them to the correct unit.
Notes
-----
`~astropy.nddata.CCDData` objects can be easily converted to a regular
Numpy array using `numpy.asarray`.
For example::
>>> from astropy.nddata import CCDData
>>> import numpy as np
>>> x = CCDData([1,2,3], unit='adu')
>>> np.asarray(x)
array([1, 2, 3])
This is useful, for example, when plotting a 2D image using
matplotlib.
>>> from astropy.nddata import CCDData
>>> from matplotlib import pyplot as plt # doctest: +SKIP
>>> x = CCDData([[1,2,3], [4,5,6]], unit='adu')
>>> plt.imshow(x) # doctest: +SKIP
"""
def __init__(self, *args, **kwd):
if 'meta' not in kwd:
kwd['meta'] = kwd.pop('header', None)
if 'header' in kwd:
raise ValueError("can't have both header and meta.")
super().__init__(*args, **kwd)
if self._wcs is not None:
llwcs = self._wcs.low_level_wcs
if not isinstance(llwcs, WCS):
raise TypeError("the wcs must be a WCS instance.")
self._wcs = llwcs
# Check if a unit is set. This can be temporarily disabled by the
# _CCDDataUnit contextmanager.
if _config_ccd_requires_unit and self.unit is None:
raise ValueError("a unit for CCDData must be specified.")
def _slice_wcs(self, item):
"""
Override the WCS slicing behaviour so that the wcs attribute continues
to be an `astropy.wcs.WCS`.
"""
if self.wcs is None:
return None
try:
return self.wcs[item]
except Exception as err:
self._handle_wcs_slicing_error(err, item)
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = value
@property
def wcs(self):
return self._wcs
@wcs.setter
def wcs(self, value):
if value is not None and not isinstance(value, WCS):
raise TypeError("the wcs must be a WCS instance.")
self._wcs = value
@property
def unit(self):
return self._unit
@unit.setter
def unit(self, value):
self._unit = u.Unit(value)
@property
def header(self):
return self._meta
@header.setter
def header(self, value):
self.meta = value
@property
def uncertainty(self):
return self._uncertainty
@uncertainty.setter
def uncertainty(self, value):
if value is not None:
if isinstance(value, NDUncertainty):
if getattr(value, '_parent_nddata', None) is not None:
value = value.__class__(value, copy=False)
self._uncertainty = value
elif isinstance(value, np.ndarray):
if value.shape != self.shape:
raise ValueError("uncertainty must have same shape as "
"data.")
self._uncertainty = StdDevUncertainty(value)
log.info("array provided for uncertainty; assuming it is a "
"StdDevUncertainty.")
else:
raise TypeError("uncertainty must be an instance of a "
"NDUncertainty object or a numpy array.")
self._uncertainty.parent_nddata = self
else:
self._uncertainty = value
def to_hdu(self, hdu_mask='MASK', hdu_uncertainty='UNCERT',
hdu_flags=None, wcs_relax=True,
key_uncertainty_type='UTYPE', as_image_hdu=False):
"""Creates an HDUList object from a CCDData object.
Parameters
----------
hdu_mask, hdu_uncertainty, hdu_flags : str or None, optional
If it is a string append this attribute to the HDUList as
`~astropy.io.fits.ImageHDU` with the string as extension name.
Flags are not supported at this time. If ``None`` this attribute
is not appended.
Default is ``'MASK'`` for mask, ``'UNCERT'`` for uncertainty and
``None`` for flags.
wcs_relax : bool
Value of the ``relax`` parameter to use in converting the WCS to a
FITS header using `~astropy.wcs.WCS.to_header`. The common
``CTYPE`` ``RA---TAN-SIP`` and ``DEC--TAN-SIP`` requires
``relax=True`` for the ``-SIP`` part of the ``CTYPE`` to be
preserved.
key_uncertainty_type : str, optional
The header key name for the class name of the uncertainty (if any)
that is used to store the uncertainty type in the uncertainty hdu.
Default is ``UTYPE``.
.. versionadded:: 3.1
as_image_hdu : bool
If this option is `True`, the first item of the returned
`~astropy.io.fits.HDUList` is a `~astropy.io.fits.ImageHDU`, instead
of the default `~astropy.io.fits.PrimaryHDU`.
Raises
------
ValueError
- If ``self.mask`` is set but not a `numpy.ndarray`.
- If ``self.uncertainty`` is set but not a astropy uncertainty type.
- If ``self.uncertainty`` is set but has another unit then
``self.data``.
NotImplementedError
Saving flags is not supported.
Returns
-------
hdulist : `~astropy.io.fits.HDUList`
"""
if isinstance(self.header, fits.Header):
# Copy here so that we can modify the HDU header by adding WCS
# information without changing the header of the CCDData object.
header = self.header.copy()
else:
# Because _insert_in_metadata_fits_safe is written as a method
# we need to create a dummy CCDData instance to hold the FITS
# header we are constructing. This probably indicates that
# _insert_in_metadata_fits_safe should be rewritten in a more
# sensible way...
dummy_ccd = CCDData([1], meta=fits.Header(), unit="adu")
for k, v in self.header.items():
dummy_ccd._insert_in_metadata_fits_safe(k, v)
header = dummy_ccd.header
if self.unit is not u.dimensionless_unscaled:
header['bunit'] = self.unit.to_string()
if self.wcs:
# Simply extending the FITS header with the WCS can lead to
# duplicates of the WCS keywords; iterating over the WCS
# header should be safer.
#
# Turns out if I had read the io.fits.Header.extend docs more
# carefully, I would have realized that the keywords exist to
# avoid duplicates and preserve, as much as possible, the
# structure of the commentary cards.
#
# Note that until astropy/astropy#3967 is closed, the extend
# will fail if there are comment cards in the WCS header but
# not header.
wcs_header = self.wcs.to_header(relax=wcs_relax)
header.extend(wcs_header, useblanks=False, update=True)
if as_image_hdu:
hdus = [fits.ImageHDU(self.data, header)]
else:
hdus = [fits.PrimaryHDU(self.data, header)]
if hdu_mask and self.mask is not None:
# Always assuming that the mask is a np.ndarray (check that it has
# a 'shape').
if not hasattr(self.mask, 'shape'):
raise ValueError('only a numpy.ndarray mask can be saved.')
# Convert boolean mask to uint since io.fits cannot handle bool.
hduMask = fits.ImageHDU(self.mask.astype(np.uint8), name=hdu_mask)
hdus.append(hduMask)
if hdu_uncertainty and self.uncertainty is not None:
# We need to save some kind of information which uncertainty was
# used so that loading the HDUList can infer the uncertainty type.
# No idea how this can be done so only allow StdDevUncertainty.
uncertainty_cls = self.uncertainty.__class__
if uncertainty_cls not in _known_uncertainties:
raise ValueError('only uncertainties of type {} can be saved.'
.format(_known_uncertainties))
uncertainty_name = _unc_cls_to_name[uncertainty_cls]
hdr_uncertainty = fits.Header()
hdr_uncertainty[key_uncertainty_type] = uncertainty_name
# Assuming uncertainty is an StdDevUncertainty save just the array
# this might be problematic if the Uncertainty has a unit differing
# from the data so abort for different units. This is important for
# astropy > 1.2
if (hasattr(self.uncertainty, 'unit') and
self.uncertainty.unit is not None):
if not _uncertainty_unit_equivalent_to_parent(
uncertainty_cls, self.uncertainty.unit, self.unit):
raise ValueError(
'saving uncertainties with a unit that is not '
'equivalent to the unit from the data unit is not '
'supported.')
hduUncert = fits.ImageHDU(self.uncertainty.array, hdr_uncertainty,
name=hdu_uncertainty)
hdus.append(hduUncert)
if hdu_flags and self.flags:
raise NotImplementedError('adding the flags to a HDU is not '
'supported at this time.')
hdulist = fits.HDUList(hdus)
return hdulist
def copy(self):
"""
Return a copy of the CCDData object.
"""
return self.__class__(self, copy=True)
add = _arithmetic(np.add)(NDDataArray.add)
subtract = _arithmetic(np.subtract)(NDDataArray.subtract)
multiply = _arithmetic(np.multiply)(NDDataArray.multiply)
divide = _arithmetic(np.true_divide)(NDDataArray.divide)
def _insert_in_metadata_fits_safe(self, key, value):
"""
Insert key/value pair into metadata in a way that FITS can serialize.
Parameters
----------
key : str
Key to be inserted in dictionary.
value : str or None
Value to be inserted.
Notes
-----
This addresses a shortcoming of the FITS standard. There are length
restrictions on both the ``key`` (8 characters) and ``value`` (72
characters) in the FITS standard. There is a convention for handling
long keywords and a convention for handling long values, but the
two conventions cannot be used at the same time.
This addresses that case by checking the length of the ``key`` and
``value`` and, if necessary, shortening the key.
"""
if len(key) > 8 and len(value) > 72:
short_name = key[:8]
self.meta[f'HIERARCH {key.upper()}'] = (
short_name, f"Shortened name for {key}")
self.meta[short_name] = value
else:
self.meta[key] = value
# A dictionary mapping "known" invalid fits unit
known_invalid_fits_unit_strings = {'ELECTRONS/S': u.electron/u.s,
'ELECTRONS': u.electron,
'electrons': u.electron}
# These need to be importable by the tests...
_KEEP_THESE_KEYWORDS_IN_HEADER = [
'JD-OBS',
'MJD-OBS',
'DATE-OBS'
]
_PCs = {'PC1_1', 'PC1_2', 'PC2_1', 'PC2_2'}
_CDs = {'CD1_1', 'CD1_2', 'CD2_1', 'CD2_2'}
def _generate_wcs_and_update_header(hdr):
"""
Generate a WCS object from a header and remove the WCS-specific
keywords from the header.
Parameters
----------
hdr : astropy.io.fits.header or other dict-like
Returns
-------
new_header, wcs
"""
# Try constructing a WCS object.
try:
wcs = WCS(hdr)
except Exception as exc:
# Normally WCS only raises Warnings and doesn't fail but in rare
# cases (malformed header) it could fail...
log.info('An exception happened while extracting WCS information from '
'the Header.\n{}: {}'.format(type(exc).__name__, str(exc)))
return hdr, None
# Test for success by checking to see if the wcs ctype has a non-empty
# value, return None for wcs if ctype is empty.
if not wcs.wcs.ctype[0]:
return (hdr, None)
new_hdr = hdr.copy()
# If the keywords below are in the header they are also added to WCS.
# It seems like they should *not* be removed from the header, though.
wcs_header = wcs.to_header(relax=True)
for k in wcs_header:
if k not in _KEEP_THESE_KEYWORDS_IN_HEADER:
new_hdr.remove(k, ignore_missing=True)
# Check that this does not result in an inconsistent header WCS if the WCS
# is converted back to a header.
if (_PCs & set(wcs_header)) and (_CDs & set(new_hdr)):
# The PCi_j representation is used by the astropy.wcs object,
# so CDi_j keywords were not removed from new_hdr. Remove them now.
for cd in _CDs:
new_hdr.remove(cd, ignore_missing=True)
# The other case -- CD in the header produced by astropy.wcs -- should
# never happen based on [1], which computes the matrix in PC form.
# [1]: https://github.com/astropy/astropy/blob/1cf277926d3598dd672dd528504767c37531e8c9/cextern/wcslib/C/wcshdr.c#L596
#
# The test test_ccddata.test_wcs_keyword_removal_for_wcs_test_files() does
# check for the possibility that both PC and CD are present in the result
# so if the implementation of to_header changes in wcslib in the future
# then the tests should catch it, and then this code will need to be
# updated.
# We need to check for any SIP coefficients that got left behind if the
# header has SIP.
if wcs.sip is not None:
keyword = '{}_{}_{}'
polynomials = ['A', 'B', 'AP', 'BP']
for poly in polynomials:
order = wcs.sip.__getattribute__(f'{poly.lower()}_order')
for i, j in itertools.product(range(order), repeat=2):
new_hdr.remove(keyword.format(poly, i, j),
ignore_missing=True)
return (new_hdr, wcs)
def fits_ccddata_reader(filename, hdu=0, unit=None, hdu_uncertainty='UNCERT',
hdu_mask='MASK', hdu_flags=None,
key_uncertainty_type='UTYPE', **kwd):
"""
Generate a CCDData object from a FITS file.
Parameters
----------
filename : str
Name of fits file.
hdu : int, str, tuple of (str, int), optional
Index or other identifier of the Header Data Unit of the FITS
file from which CCDData should be initialized. If zero and
no data in the primary HDU, it will search for the first
extension HDU with data. The header will be added to the primary HDU.
Default is ``0``.
unit : `~astropy.units.Unit`, optional
Units of the image data. If this argument is provided and there is a
unit for the image in the FITS header (the keyword ``BUNIT`` is used
as the unit, if present), this argument is used for the unit.
Default is ``None``.
hdu_uncertainty : str or None, optional
FITS extension from which the uncertainty should be initialized. If the
extension does not exist the uncertainty of the CCDData is ``None``.
Default is ``'UNCERT'``.
hdu_mask : str or None, optional
FITS extension from which the mask should be initialized. If the
extension does not exist the mask of the CCDData is ``None``.
Default is ``'MASK'``.
hdu_flags : str or None, optional
Currently not implemented.
Default is ``None``.
key_uncertainty_type : str, optional
The header key name where the class name of the uncertainty is stored
in the hdu of the uncertainty (if any).
Default is ``UTYPE``.
.. versionadded:: 3.1
kwd :
Any additional keyword parameters are passed through to the FITS reader
in :mod:`astropy.io.fits`; see Notes for additional discussion.
Notes
-----
FITS files that contained scaled data (e.g. unsigned integer images) will
be scaled and the keywords used to manage scaled data in
:mod:`astropy.io.fits` are disabled.
"""
unsupport_open_keywords = {
'do_not_scale_image_data': 'Image data must be scaled.',
'scale_back': 'Scale information is not preserved.'
}
for key, msg in unsupport_open_keywords.items():
if key in kwd:
prefix = f'unsupported keyword: {key}.'
raise TypeError(' '.join([prefix, msg]))
with fits.open(filename, **kwd) as hdus:
hdr = hdus[hdu].header
if hdu_uncertainty is not None and hdu_uncertainty in hdus:
unc_hdu = hdus[hdu_uncertainty]
stored_unc_name = unc_hdu.header.get(key_uncertainty_type, 'None')
# For compatibility reasons the default is standard deviation
# uncertainty because files could have been created before the
# uncertainty type was stored in the header.
unc_type = _unc_name_to_cls.get(stored_unc_name, StdDevUncertainty)
uncertainty = unc_type(unc_hdu.data)
else:
uncertainty = None
if hdu_mask is not None and hdu_mask in hdus:
# Mask is saved as uint but we want it to be boolean.
mask = hdus[hdu_mask].data.astype(np.bool_)
else:
mask = None
if hdu_flags is not None and hdu_flags in hdus:
raise NotImplementedError('loading flags is currently not '
'supported.')
# search for the first instance with data if
# the primary header is empty.
if hdu == 0 and hdus[hdu].data is None:
for i in range(len(hdus)):
if (hdus.info(hdu)[i][3] == 'ImageHDU' and
hdus.fileinfo(i)['datSpan'] > 0):
hdu = i
comb_hdr = hdus[hdu].header.copy()
# Add header values from the primary header that aren't
# present in the extension header.
comb_hdr.extend(hdr, unique=True)
hdr = comb_hdr
log.info(f"first HDU with data is extension {hdu}.")
break
if 'bunit' in hdr:
fits_unit_string = hdr['bunit']
# patch to handle FITS files using ADU for the unit instead of the
# standard version of 'adu'
if fits_unit_string.strip().lower() == 'adu':
fits_unit_string = fits_unit_string.lower()
else:
fits_unit_string = None
if fits_unit_string:
if unit is None:
# Convert the BUNIT header keyword to a unit and if that's not
# possible raise a meaningful error message.
try:
kifus = CCDData.known_invalid_fits_unit_strings
if fits_unit_string in kifus:
fits_unit_string = kifus[fits_unit_string]
fits_unit_string = u.Unit(fits_unit_string)
except ValueError:
raise ValueError(
'The Header value for the key BUNIT ({}) cannot be '
'interpreted as valid unit. To successfully read the '
'file as CCDData you can pass in a valid `unit` '
'argument explicitly or change the header of the FITS '
'file before reading it.'
.format(fits_unit_string))
else:
log.info("using the unit {} passed to the FITS reader instead "
"of the unit {} in the FITS file."
.format(unit, fits_unit_string))
use_unit = unit or fits_unit_string
hdr, wcs = _generate_wcs_and_update_header(hdr)
ccd_data = CCDData(hdus[hdu].data, meta=hdr, unit=use_unit,
mask=mask, uncertainty=uncertainty, wcs=wcs)
return ccd_data
def fits_ccddata_writer(
ccd_data, filename, hdu_mask='MASK', hdu_uncertainty='UNCERT',
hdu_flags=None, key_uncertainty_type='UTYPE', as_image_hdu=False,
**kwd):
"""
Write CCDData object to FITS file.
Parameters
----------
filename : str
Name of file.
hdu_mask, hdu_uncertainty, hdu_flags : str or None, optional
If it is a string append this attribute to the HDUList as
`~astropy.io.fits.ImageHDU` with the string as extension name.
Flags are not supported at this time. If ``None`` this attribute
is not appended.
Default is ``'MASK'`` for mask, ``'UNCERT'`` for uncertainty and
``None`` for flags.
key_uncertainty_type : str, optional
The header key name for the class name of the uncertainty (if any)
that is used to store the uncertainty type in the uncertainty hdu.
Default is ``UTYPE``.
.. versionadded:: 3.1
as_image_hdu : bool
If this option is `True`, the first item of the returned
`~astropy.io.fits.HDUList` is a `~astropy.io.fits.ImageHDU`, instead of
the default `~astropy.io.fits.PrimaryHDU`.
kwd :
All additional keywords are passed to :py:mod:`astropy.io.fits`
Raises
------
ValueError
- If ``self.mask`` is set but not a `numpy.ndarray`.
- If ``self.uncertainty`` is set but not a
`~astropy.nddata.StdDevUncertainty`.
- If ``self.uncertainty`` is set but has another unit then
``self.data``.
NotImplementedError
Saving flags is not supported.
"""
hdu = ccd_data.to_hdu(
hdu_mask=hdu_mask, hdu_uncertainty=hdu_uncertainty,
key_uncertainty_type=key_uncertainty_type, hdu_flags=hdu_flags,
as_image_hdu=as_image_hdu)
if as_image_hdu:
hdu.insert(0, fits.PrimaryHDU())
hdu.writeto(filename, **kwd)
with registry.delay_doc_updates(CCDData):
registry.register_reader('fits', CCDData, fits_ccddata_reader)
registry.register_writer('fits', CCDData, fits_ccddata_writer)
registry.register_identifier('fits', CCDData, fits.connect.is_fits)
|
Subsets and Splits