hash
stringlengths
64
64
content
stringlengths
0
1.51M
2ecd54550a06ccf673008281a2dba4fc98fae70e88e5d8e8f8100815bb387027
# Licensed under a 3-clause BSD style license - see PYFITS.rst import ctypes import gc import itertools import math import re import time import warnings from contextlib import suppress import numpy as np from astropy.io.fits import conf from astropy.io.fits._tiled_compression import compress_hdu, decompress_hdu from astropy.io.fits.card import Card from astropy.io.fits.column import KEYWORD_NAMES as TABLE_KEYWORD_NAMES from astropy.io.fits.column import TDEF_RE, ColDefs, Column from astropy.io.fits.fitsrec import FITS_rec from astropy.io.fits.header import Header from astropy.io.fits.util import ( _get_array_mmap, _is_int, _is_pseudo_integer, _pseudo_zero, ) from astropy.utils import lazyproperty from astropy.utils.exceptions import AstropyUserWarning from .base import BITPIX2DTYPE, DELAYED, DTYPE2BITPIX, ExtensionHDU from .image import ImageHDU from .table import BinTableHDU # This global variable is used e.g., when calling fits.open with # disable_image_compression which temporarily changes the global variable to # False. This should ideally be refactored to avoid relying on global module # variables. COMPRESSION_ENABLED = True # Quantization dithering method constants; these are right out of fitsio.h NO_DITHER = -1 SUBTRACTIVE_DITHER_1 = 1 SUBTRACTIVE_DITHER_2 = 2 QUANTIZE_METHOD_NAMES = { NO_DITHER: "NO_DITHER", SUBTRACTIVE_DITHER_1: "SUBTRACTIVE_DITHER_1", SUBTRACTIVE_DITHER_2: "SUBTRACTIVE_DITHER_2", } DITHER_SEED_CLOCK = 0 DITHER_SEED_CHECKSUM = -1 COMPRESSION_TYPES = ("RICE_1", "GZIP_1", "GZIP_2", "PLIO_1", "HCOMPRESS_1") # Default compression parameter values DEFAULT_COMPRESSION_TYPE = "RICE_1" DEFAULT_QUANTIZE_LEVEL = 16.0 DEFAULT_QUANTIZE_METHOD = NO_DITHER DEFAULT_DITHER_SEED = DITHER_SEED_CLOCK DEFAULT_HCOMP_SCALE = 0 DEFAULT_HCOMP_SMOOTH = 0 DEFAULT_BLOCK_SIZE = 32 DEFAULT_BYTE_PIX = 4 CMTYPE_ALIASES = {"RICE_ONE": "RICE_1"} COMPRESSION_KEYWORDS = { "ZIMAGE", "ZCMPTYPE", "ZBITPIX", "ZNAXIS", "ZMASKCMP", "ZSIMPLE", "ZTENSION", "ZEXTEND", } class CompImageHeader(Header): """ Header object for compressed image HDUs designed to keep the compression header and the underlying image header properly synchronized. This essentially wraps the image header, so that all values are read from and written to the image header. However, updates to the image header will also update the table header where appropriate. Note that if no image header is passed in, the code will instantiate a regular `~astropy.io.fits.Header`. """ # TODO: The difficulty of implementing this screams a need to rewrite this # module _keyword_remaps = { "SIMPLE": "ZSIMPLE", "XTENSION": "ZTENSION", "BITPIX": "ZBITPIX", "NAXIS": "ZNAXIS", "EXTEND": "ZEXTEND", "BLOCKED": "ZBLOCKED", "PCOUNT": "ZPCOUNT", "GCOUNT": "ZGCOUNT", "CHECKSUM": "ZHECKSUM", "DATASUM": "ZDATASUM", } _zdef_re = re.compile(r"(?P<label>^[Zz][a-zA-Z]*)(?P<num>[1-9][0-9 ]*$)?") _compression_keywords = set(_keyword_remaps.values()).union( ["ZIMAGE", "ZCMPTYPE", "ZMASKCMP", "ZQUANTIZ", "ZDITHER0"] ) _indexed_compression_keywords = {"ZNAXIS", "ZTILE", "ZNAME", "ZVAL"} # TODO: Once it place it should be possible to manage some of this through # the schema system, but it's not quite ready for that yet. Also it still # makes more sense to change CompImageHDU to subclass ImageHDU :/ def __new__(cls, table_header, image_header=None): # 2019-09-14 (MHvK): No point wrapping anything if no image_header is # given. This happens if __getitem__ and copy are called - our super # class will aim to initialize a new, possibly partially filled # header, but we cannot usefully deal with that. # TODO: the above suggests strongly we should *not* subclass from # Header. See also comment above about the need for reorganization. if image_header is None: return Header(table_header) else: return super().__new__(cls) def __init__(self, table_header, image_header): self._cards = image_header._cards self._keyword_indices = image_header._keyword_indices self._rvkc_indices = image_header._rvkc_indices self._modified = image_header._modified self._table_header = table_header # We need to override and Header methods that can modify the header, and # ensure that they sync with the underlying _table_header def __setitem__(self, key, value): # This isn't pretty, but if the `key` is either an int or a tuple we # need to figure out what keyword name that maps to before doing # anything else; these checks will be repeated later in the # super().__setitem__ call but I don't see another way around it # without some major refactoring if self._set_slice(key, value, self): return if isinstance(key, int): keyword, index = self._keyword_from_index(key) elif isinstance(key, tuple): keyword, index = key else: # We don't want to specify and index otherwise, because that will # break the behavior for new keywords and for commentary keywords keyword, index = key, None if self._is_reserved_keyword(keyword): return super().__setitem__(key, value) if index is not None: remapped_keyword = self._remap_keyword(keyword) self._table_header[remapped_keyword, index] = value # Else this will pass through to ._update def __delitem__(self, key): if isinstance(key, slice) or self._haswildcard(key): # If given a slice pass that on to the superclass and bail out # early; we only want to make updates to _table_header when given # a key specifying a single keyword return super().__delitem__(key) if isinstance(key, int): keyword, index = self._keyword_from_index(key) elif isinstance(key, tuple): keyword, index = key else: keyword, index = key, None if key not in self: raise KeyError(f"Keyword {key!r} not found.") super().__delitem__(key) remapped_keyword = self._remap_keyword(keyword) if remapped_keyword in self._table_header: if index is not None: del self._table_header[(remapped_keyword, index)] else: del self._table_header[remapped_keyword] def append(self, card=None, useblanks=True, bottom=False, end=False): # This logic unfortunately needs to be duplicated from the base class # in order to determine the keyword if isinstance(card, str): card = Card(card) elif isinstance(card, tuple): card = Card(*card) elif card is None: card = Card() elif not isinstance(card, Card): raise ValueError( "The value appended to a Header must be either a keyword or " "(keyword, value, [comment]) tuple; got: {!r}".format(card) ) if self._is_reserved_keyword(card.keyword): return super().append(card=card, useblanks=useblanks, bottom=bottom, end=end) remapped_keyword = self._remap_keyword(card.keyword) # card.keyword strips the HIERARCH if present so this must be added # back to avoid a warning. if str(card).startswith("HIERARCH ") and not remapped_keyword.startswith( "HIERARCH " ): remapped_keyword = "HIERARCH " + remapped_keyword card = Card(remapped_keyword, card.value, card.comment) # Here we disable the use of blank cards, because the call above to # Header.append may have already deleted a blank card in the table # header, thanks to inheritance: Header.append calls 'del self[-1]' # to delete a blank card, which calls CompImageHeader.__deltitem__, # which deletes the blank card both in the image and the table headers! self._table_header.append(card=card, useblanks=False, bottom=bottom, end=end) def insert(self, key, card, useblanks=True, after=False): if isinstance(key, int): # Determine condition to pass through to append if after: if key == -1: key = len(self._cards) else: key += 1 if key >= len(self._cards): self.append(card, end=True) return if isinstance(card, str): card = Card(card) elif isinstance(card, tuple): card = Card(*card) elif not isinstance(card, Card): raise ValueError( "The value inserted into a Header must be either a keyword or " "(keyword, value, [comment]) tuple; got: {!r}".format(card) ) if self._is_reserved_keyword(card.keyword): return # Now the tricky part is to determine where to insert in the table # header. If given a numerical index we need to map that to the # corresponding index in the table header. Although rare, there may be # cases where there is no mapping in which case we just try the same # index # NOTE: It is crucial that remapped_index in particular is figured out # before the image header is modified remapped_index = self._remap_index(key) remapped_keyword = self._remap_keyword(card.keyword) super().insert(key, card, useblanks=useblanks, after=after) card = Card(remapped_keyword, card.value, card.comment) # Here we disable the use of blank cards, because the call above to # Header.insert may have already deleted a blank card in the table # header, thanks to inheritance: Header.insert calls 'del self[-1]' # to delete a blank card, which calls CompImageHeader.__delitem__, # which deletes the blank card both in the image and the table headers! self._table_header.insert(remapped_index, card, useblanks=False, after=after) def _update(self, card): keyword = card[0] if self._is_reserved_keyword(keyword): return super()._update(card) if keyword in Card._commentary_keywords: # Otherwise this will result in a duplicate insertion return remapped_keyword = self._remap_keyword(keyword) self._table_header._update((remapped_keyword,) + card[1:]) # Last piece needed (I think) for synchronizing with the real header # This one is tricky since _relativeinsert calls insert def _relativeinsert(self, card, before=None, after=None, replace=False): keyword = card[0] if self._is_reserved_keyword(keyword): return # Now we have to figure out how to remap 'before' and 'after' if before is None: if isinstance(after, int): remapped_after = self._remap_index(after) else: remapped_after = self._remap_keyword(after) remapped_before = None else: if isinstance(before, int): remapped_before = self._remap_index(before) else: remapped_before = self._remap_keyword(before) remapped_after = None super()._relativeinsert(card, before=before, after=after, replace=replace) remapped_keyword = self._remap_keyword(keyword) card = Card(remapped_keyword, card[1], card[2]) self._table_header._relativeinsert( card, before=remapped_before, after=remapped_after, replace=replace ) @classmethod def _is_reserved_keyword(cls, keyword, warn=True): msg = ( "Keyword {!r} is reserved for use by the FITS Tiled Image " "Convention and will not be stored in the header for the " "image being compressed.".format(keyword) ) if keyword == "TFIELDS": if warn: warnings.warn(msg) return True m = TDEF_RE.match(keyword) if m and m.group("label").upper() in TABLE_KEYWORD_NAMES: if warn: warnings.warn(msg) return True m = cls._zdef_re.match(keyword) if m: label = m.group("label").upper() num = m.group("num") if num is not None and label in cls._indexed_compression_keywords: if warn: warnings.warn(msg) return True elif label in cls._compression_keywords: if warn: warnings.warn(msg) return True return False @classmethod def _remap_keyword(cls, keyword): # Given a keyword that one might set on an image, remap that keyword to # the name used for it in the COMPRESSED HDU header # This is mostly just a lookup in _keyword_remaps, but needs handling # for NAXISn keywords is_naxisn = False if keyword[:5] == "NAXIS": with suppress(ValueError): index = int(keyword[5:]) is_naxisn = index > 0 if is_naxisn: return f"ZNAXIS{index}" # If the keyword does not need to be remapped then just return the # original keyword return cls._keyword_remaps.get(keyword, keyword) def _remap_index(self, idx): # Given an integer index into this header, map that to the index in the # table header for the same card. If the card doesn't exist in the # table header (generally should *not* be the case) this will just # return the same index # This *does* also accept a keyword or (keyword, repeat) tuple and # obtains the associated numerical index with self._cardindex if not isinstance(idx, int): idx = self._cardindex(idx) keyword, repeat = self._keyword_from_index(idx) remapped_insert_keyword = self._remap_keyword(keyword) with suppress(IndexError, KeyError): idx = self._table_header._cardindex((remapped_insert_keyword, repeat)) return idx def clear(self): """ Remove all cards from the header. """ self._table_header.clear() super().clear() # TODO: Fix this class so that it doesn't actually inherit from BinTableHDU, # but instead has an internal BinTableHDU reference class CompImageHDU(BinTableHDU): """ Compressed Image HDU class. """ _manages_own_heap = True """ The calls to CFITSIO lay out the heap data in memory, and we write it out the same way CFITSIO organizes it. In principle this would break if a user manually changes the underlying compressed data by hand, but there is no reason they would want to do that (and if they do that's their responsibility). """ _default_name = "COMPRESSED_IMAGE" def __init__( self, data=None, header=None, name=None, compression_type=DEFAULT_COMPRESSION_TYPE, tile_size=None, hcomp_scale=DEFAULT_HCOMP_SCALE, hcomp_smooth=DEFAULT_HCOMP_SMOOTH, quantize_level=DEFAULT_QUANTIZE_LEVEL, quantize_method=DEFAULT_QUANTIZE_METHOD, dither_seed=DEFAULT_DITHER_SEED, do_not_scale_image_data=False, uint=False, scale_back=False, **kwargs, ): """ Parameters ---------- data : array, optional Uncompressed image data header : `~astropy.io.fits.Header`, optional Header to be associated with the image; when reading the HDU from a file (data=DELAYED), the header read from the file name : str, optional The ``EXTNAME`` value; if this value is `None`, then the name from the input image header will be used; if there is no name in the input image header then the default name ``COMPRESSED_IMAGE`` is used. compression_type : str, optional Compression algorithm: one of ``'RICE_1'``, ``'RICE_ONE'``, ``'PLIO_1'``, ``'GZIP_1'``, ``'GZIP_2'``, ``'HCOMPRESS_1'`` tile_size : int, optional Compression tile sizes. Default treats each row of image as a tile. hcomp_scale : float, optional HCOMPRESS scale parameter hcomp_smooth : float, optional HCOMPRESS smooth parameter quantize_level : float, optional Floating point quantization level; see note below quantize_method : int, optional Floating point quantization dithering method; can be either ``NO_DITHER`` (-1; default), ``SUBTRACTIVE_DITHER_1`` (1), or ``SUBTRACTIVE_DITHER_2`` (2); see note below dither_seed : int, optional Random seed to use for dithering; can be either an integer in the range 1 to 1000 (inclusive), ``DITHER_SEED_CLOCK`` (0; default), or ``DITHER_SEED_CHECKSUM`` (-1); see note below Notes ----- The astropy.io.fits package supports 2 methods of image compression: 1) The entire FITS file may be externally compressed with the gzip or pkzip utility programs, producing a ``*.gz`` or ``*.zip`` file, respectively. When reading compressed files of this type, Astropy first uncompresses the entire file into a temporary file before performing the requested read operations. The astropy.io.fits package does not support writing to these types of compressed files. This type of compression is supported in the ``_File`` class, not in the `CompImageHDU` class. The file compression type is recognized by the ``.gz`` or ``.zip`` file name extension. 2) The `CompImageHDU` class supports the FITS tiled image compression convention in which the image is subdivided into a grid of rectangular tiles, and each tile of pixels is individually compressed. The details of this FITS compression convention are described at the `FITS Support Office web site <https://fits.gsfc.nasa.gov/registry/tilecompression.html>`_. Basically, the compressed image tiles are stored in rows of a variable length array column in a FITS binary table. The astropy.io.fits recognizes that this binary table extension contains an image and treats it as if it were an image extension. Under this tile-compression format, FITS header keywords remain uncompressed. At this time, Astropy does not support the ability to extract and uncompress sections of the image without having to uncompress the entire image. The astropy.io.fits package supports 3 general-purpose compression algorithms plus one other special-purpose compression technique that is designed for data masks with positive integer pixel values. The 3 general purpose algorithms are GZIP, Rice, and HCOMPRESS, and the special-purpose technique is the IRAF pixel list compression technique (PLIO). The ``compression_type`` parameter defines the compression algorithm to be used. The FITS image can be subdivided into any desired rectangular grid of compression tiles. With the GZIP, Rice, and PLIO algorithms, the default is to take each row of the image as a tile. The HCOMPRESS algorithm is inherently 2-dimensional in nature, so the default in this case is to take 16 rows of the image per tile. In most cases, it makes little difference what tiling pattern is used, so the default tiles are usually adequate. In the case of very small images, it could be more efficient to compress the whole image as a single tile. Note that the image dimensions are not required to be an integer multiple of the tile dimensions; if not, then the tiles at the edges of the image will be smaller than the other tiles. The ``tile_size`` parameter may be provided as a list of tile sizes, one for each dimension in the image. For example a ``tile_size`` value of ``[100,100]`` would divide a 300 X 300 image into 9 100 X 100 tiles. The 4 supported image compression algorithms are all 'lossless' when applied to integer FITS images; the pixel values are preserved exactly with no loss of information during the compression and uncompression process. In addition, the HCOMPRESS algorithm supports a 'lossy' compression mode that will produce larger amount of image compression. This is achieved by specifying a non-zero value for the ``hcomp_scale`` parameter. Since the amount of compression that is achieved depends directly on the RMS noise in the image, it is usually more convenient to specify the ``hcomp_scale`` factor relative to the RMS noise. Setting ``hcomp_scale = 2.5`` means use a scale factor that is 2.5 times the calculated RMS noise in the image tile. In some cases it may be desirable to specify the exact scaling to be used, instead of specifying it relative to the calculated noise value. This may be done by specifying the negative of the desired scale value (typically in the range -2 to -100). Very high compression factors (of 100 or more) can be achieved by using large ``hcomp_scale`` values, however, this can produce undesirable 'blocky' artifacts in the compressed image. A variation of the HCOMPRESS algorithm (called HSCOMPRESS) can be used in this case to apply a small amount of smoothing of the image when it is uncompressed to help cover up these artifacts. This smoothing is purely cosmetic and does not cause any significant change to the image pixel values. Setting the ``hcomp_smooth`` parameter to 1 will engage the smoothing algorithm. Floating point FITS images (which have ``BITPIX`` = -32 or -64) usually contain too much 'noise' in the least significant bits of the mantissa of the pixel values to be effectively compressed with any lossless algorithm. Consequently, floating point images are first quantized into scaled integer pixel values (and thus throwing away much of the noise) before being compressed with the specified algorithm (either GZIP, RICE, or HCOMPRESS). This technique produces much higher compression factors than simply using the GZIP utility to externally compress the whole FITS file, but it also means that the original floating point value pixel values are not exactly preserved. When done properly, this integer scaling technique will only discard the insignificant noise while still preserving all the real information in the image. The amount of precision that is retained in the pixel values is controlled by the ``quantize_level`` parameter. Larger values will result in compressed images whose pixels more closely match the floating point pixel values, but at the same time the amount of compression that is achieved will be reduced. Users should experiment with different values for this parameter to determine the optimal value that preserves all the useful information in the image, without needlessly preserving all the 'noise' which will hurt the compression efficiency. The default value for the ``quantize_level`` scale factor is 16, which means that scaled integer pixel values will be quantized such that the difference between adjacent integer values will be 1/16th of the noise level in the image background. An optimized algorithm is used to accurately estimate the noise in the image. As an example, if the RMS noise in the background pixels of an image = 32.0, then the spacing between adjacent scaled integer pixel values will equal 2.0 by default. Note that the RMS noise is independently calculated for each tile of the image, so the resulting integer scaling factor may fluctuate slightly for each tile. In some cases, it may be desirable to specify the exact quantization level to be used, instead of specifying it relative to the calculated noise value. This may be done by specifying the negative of desired quantization level for the value of ``quantize_level``. In the previous example, one could specify ``quantize_level = -2.0`` so that the quantized integer levels differ by 2.0. Larger negative values for ``quantize_level`` means that the levels are more coarsely-spaced, and will produce higher compression factors. The quantization algorithm can also apply one of two random dithering methods in order to reduce bias in the measured intensity of background regions. The default method, specified with the constant ``SUBTRACTIVE_DITHER_1`` adds dithering to the zero-point of the quantization array itself rather than adding noise to the actual image. The random noise is added on a pixel-by-pixel basis, so in order restore each pixel from its integer value to its floating point value it is necessary to replay the same sequence of random numbers for each pixel (see below). The other method, ``SUBTRACTIVE_DITHER_2``, is exactly like the first except that before dithering any pixel with a floating point value of ``0.0`` is replaced with the special integer value ``-2147483647``. When the image is uncompressed, pixels with this value are restored back to ``0.0`` exactly. Finally, a value of ``NO_DITHER`` disables dithering entirely. As mentioned above, when using the subtractive dithering algorithm it is necessary to be able to generate a (pseudo-)random sequence of noise for each pixel, and replay that same sequence upon decompressing. To facilitate this, a random seed between 1 and 10000 (inclusive) is used to seed a random number generator, and that seed is stored in the ``ZDITHER0`` keyword in the header of the compressed HDU. In order to use that seed to generate the same sequence of random numbers the same random number generator must be used at compression and decompression time; for that reason the tiled image convention provides an implementation of a very simple pseudo-random number generator. The seed itself can be provided in one of three ways, controllable by the ``dither_seed`` argument: It may be specified manually, or it may be generated arbitrarily based on the system's clock (``DITHER_SEED_CLOCK``) or based on a checksum of the pixels in the image's first tile (``DITHER_SEED_CHECKSUM``). The clock-based method is the default, and is sufficient to ensure that the value is reasonably "arbitrary" and that the same seed is unlikely to be generated sequentially. The checksum method, on the other hand, ensures that the same seed is used every time for a specific image. This is particularly useful for software testing as it ensures that the same image will always use the same seed. """ compression_type = CMTYPE_ALIASES.get(compression_type, compression_type) if data is DELAYED: # Reading the HDU from a file super().__init__(data=data, header=header) else: # Create at least a skeleton HDU that matches the input # header and data (if any were input) super().__init__(data=None, header=header) # Store the input image data self.data = data # Update the table header (_header) to the compressed # image format and to match the input data (if any); # Create the image header (_image_header) from the input # image header (if any) and ensure it matches the input # data; Create the initially empty table data array to # hold the compressed data. self._update_header_data( header, name, compression_type=compression_type, tile_size=tile_size, hcomp_scale=hcomp_scale, hcomp_smooth=hcomp_smooth, quantize_level=quantize_level, quantize_method=quantize_method, dither_seed=dither_seed, ) # TODO: A lot of this should be passed on to an internal image HDU o # something like that, see ticket #88 self._do_not_scale_image_data = do_not_scale_image_data self._uint = uint self._scale_back = scale_back self._axes = [ self._header.get("ZNAXIS" + str(axis + 1), 0) for axis in range(self._header.get("ZNAXIS", 0)) ] # store any scale factors from the table header if do_not_scale_image_data: self._bzero = 0 self._bscale = 1 else: self._bzero = self._header.get("BZERO", 0) self._bscale = self._header.get("BSCALE", 1) self._bitpix = self._header["ZBITPIX"] self._orig_bzero = self._bzero self._orig_bscale = self._bscale self._orig_bitpix = self._bitpix def _remove_unnecessary_default_extnames(self, header): """Remove default EXTNAME values if they are unnecessary. Some data files (eg from CFHT) can have the default EXTNAME and an explicit value. This method removes the default if a more specific header exists. It also removes any duplicate default values. """ if "EXTNAME" in header: indices = header._keyword_indices["EXTNAME"] # Only continue if there is more than one found n_extname = len(indices) if n_extname > 1: extnames_to_remove = [ index for index in indices if header[index] == self._default_name ] if len(extnames_to_remove) == n_extname: # Keep the first (they are all the same) extnames_to_remove.pop(0) # Remove them all in reverse order to keep the index unchanged. for index in reversed(sorted(extnames_to_remove)): del header[index] @property def name(self): # Convert the value to a string to be flexible in some pathological # cases (see ticket #96) # Similar to base class but uses .header rather than ._header return str(self.header.get("EXTNAME", self._default_name)) @name.setter def name(self, value): # This is a copy of the base class but using .header instead # of ._header to ensure that the name stays in sync. if not isinstance(value, str): raise TypeError("'name' attribute must be a string") if not conf.extension_name_case_sensitive: value = value.upper() if "EXTNAME" in self.header: self.header["EXTNAME"] = value else: self.header["EXTNAME"] = (value, "extension name") @classmethod def match_header(cls, header): card = header.cards[0] if card.keyword != "XTENSION": return False xtension = card.value if isinstance(xtension, str): xtension = xtension.rstrip() if xtension not in ("BINTABLE", "A3DTABLE"): return False if "ZIMAGE" not in header or not header["ZIMAGE"]: return False return COMPRESSION_ENABLED def _update_header_data( self, image_header, name=None, compression_type=None, tile_size=None, hcomp_scale=None, hcomp_smooth=None, quantize_level=None, quantize_method=None, dither_seed=None, ): """ Update the table header (`_header`) to the compressed image format and to match the input data (if any). Create the image header (`_image_header`) from the input image header (if any) and ensure it matches the input data. Create the initially-empty table data array to hold the compressed data. This method is mainly called internally, but a user may wish to call this method after assigning new data to the `CompImageHDU` object that is of a different type. Parameters ---------- image_header : `~astropy.io.fits.Header` header to be associated with the image name : str, optional the ``EXTNAME`` value; if this value is `None`, then the name from the input image header will be used; if there is no name in the input image header then the default name 'COMPRESSED_IMAGE' is used compression_type : str, optional compression algorithm 'RICE_1', 'PLIO_1', 'GZIP_1', 'GZIP_2', 'HCOMPRESS_1'; if this value is `None`, use value already in the header; if no value already in the header, use 'RICE_1' tile_size : sequence of int, optional compression tile sizes as a list; if this value is `None`, use value already in the header; if no value already in the header, treat each row of image as a tile hcomp_scale : float, optional HCOMPRESS scale parameter; if this value is `None`, use the value already in the header; if no value already in the header, use 1 hcomp_smooth : float, optional HCOMPRESS smooth parameter; if this value is `None`, use the value already in the header; if no value already in the header, use 0 quantize_level : float, optional floating point quantization level; if this value is `None`, use the value already in the header; if no value already in header, use 16 quantize_method : int, optional floating point quantization dithering method; can be either NO_DITHER (-1), SUBTRACTIVE_DITHER_1 (1; default), or SUBTRACTIVE_DITHER_2 (2) dither_seed : int, optional random seed to use for dithering; can be either an integer in the range 1 to 1000 (inclusive), DITHER_SEED_CLOCK (0; default), or DITHER_SEED_CHECKSUM (-1) """ # Clean up EXTNAME duplicates self._remove_unnecessary_default_extnames(self._header) image_hdu = ImageHDU(data=self.data, header=self._header) self._image_header = CompImageHeader(self._header, image_hdu.header) self._axes = image_hdu._axes del image_hdu # Determine based on the size of the input data whether to use the Q # column format to store compressed data or the P format. # The Q format is used only if the uncompressed data is larger than # 4 GB. This is not a perfect heuristic, as one can contrive an input # array which, when compressed, the entire binary table representing # the compressed data is larger than 4GB. That said, this is the same # heuristic used by CFITSIO, so this should give consistent results. # And the cases where this heuristic is insufficient are extreme and # almost entirely contrived corner cases, so it will do for now if self._has_data: huge_hdu = self.data.nbytes > 2**32 else: huge_hdu = False # Update the extension name in the table header if not name and "EXTNAME" not in self._header: # Do not sync this with the image header since the default # name is specific to the table header. self._header.set( "EXTNAME", self._default_name, "name of this binary table extension", after="TFIELDS", ) elif name: # Force the name into table and image headers. self.name = name # Set the compression type in the table header. if compression_type: if compression_type not in COMPRESSION_TYPES: warnings.warn( "Unknown compression type provided (supported are {}). " "Default ({}) compression will be used.".format( ", ".join(map(repr, COMPRESSION_TYPES)), DEFAULT_COMPRESSION_TYPE, ), AstropyUserWarning, ) compression_type = DEFAULT_COMPRESSION_TYPE self._header.set( "ZCMPTYPE", compression_type, "compression algorithm", after="TFIELDS" ) else: compression_type = self._header.get("ZCMPTYPE", DEFAULT_COMPRESSION_TYPE) compression_type = CMTYPE_ALIASES.get(compression_type, compression_type) # If the input image header had BSCALE/BZERO cards, then insert # them in the table header. if image_header: bzero = image_header.get("BZERO", 0.0) bscale = image_header.get("BSCALE", 1.0) after_keyword = "EXTNAME" if bscale != 1.0: self._header.set("BSCALE", bscale, after=after_keyword) after_keyword = "BSCALE" if bzero != 0.0: self._header.set("BZERO", bzero, after=after_keyword) try: bitpix_comment = image_header.comments["BITPIX"] except (AttributeError, KeyError): bitpix_comment = "data type of original image" try: naxis_comment = image_header.comments["NAXIS"] except (AttributeError, KeyError): naxis_comment = "dimension of original image" # Set the label for the first column in the table self._header.set( "TTYPE1", "COMPRESSED_DATA", "label for field 1", after="TFIELDS" ) # Set the data format for the first column. It is dependent # on the requested compression type. if compression_type == "PLIO_1": tform1 = "1QI" if huge_hdu else "1PI" else: tform1 = "1QB" if huge_hdu else "1PB" self._header.set( "TFORM1", tform1, "data format of field: variable length array", after="TTYPE1", ) # Create the first column for the table. This column holds the # compressed data. col1 = Column(name=self._header["TTYPE1"], format=tform1) # Create the additional columns required for floating point # data and calculate the width of the output table. zbitpix = self._image_header["BITPIX"] if zbitpix < 0 and quantize_level != 0.0: # floating point image has 'COMPRESSED_DATA', # 'UNCOMPRESSED_DATA', 'ZSCALE', and 'ZZERO' columns (unless using # lossless compression, per CFITSIO) ncols = 4 # CFITSIO 3.28 and up automatically use the GZIP_COMPRESSED_DATA # store floating point data that couldn't be quantized, instead # of the UNCOMPRESSED_DATA column. There's no way to control # this behavior so the only way to determine which behavior will # be employed is via the CFITSIO version ttype2 = "GZIP_COMPRESSED_DATA" # The required format for the GZIP_COMPRESSED_DATA is actually # missing from the standard docs, but CFITSIO suggests it # should be 1PB, which is logical. tform2 = "1QB" if huge_hdu else "1PB" # Set up the second column for the table that will hold any # uncompressable data. self._header.set("TTYPE2", ttype2, "label for field 2", after="TFORM1") self._header.set( "TFORM2", tform2, "data format of field: variable length array", after="TTYPE2", ) col2 = Column(name=ttype2, format=tform2) # Set up the third column for the table that will hold # the scale values for quantized data. self._header.set("TTYPE3", "ZSCALE", "label for field 3", after="TFORM2") self._header.set( "TFORM3", "1D", "data format of field: 8-byte DOUBLE", after="TTYPE3" ) col3 = Column(name=self._header["TTYPE3"], format=self._header["TFORM3"]) # Set up the fourth column for the table that will hold # the zero values for the quantized data. self._header.set("TTYPE4", "ZZERO", "label for field 4", after="TFORM3") self._header.set( "TFORM4", "1D", "data format of field: 8-byte DOUBLE", after="TTYPE4" ) after = "TFORM4" col4 = Column(name=self._header["TTYPE4"], format=self._header["TFORM4"]) # Create the ColDefs object for the table cols = ColDefs([col1, col2, col3, col4]) else: # default table has just one 'COMPRESSED_DATA' column ncols = 1 after = "TFORM1" # remove any header cards for the additional columns that # may be left over from the previous data to_remove = ["TTYPE2", "TFORM2", "TTYPE3", "TFORM3", "TTYPE4", "TFORM4"] for k in to_remove: try: del self._header[k] except KeyError: pass # Create the ColDefs object for the table cols = ColDefs([col1]) # Update the table header with the width of the table, the # number of fields in the table, the indicator for a compressed # image HDU, the data type of the image data and the number of # dimensions in the image data array. self._header.set("NAXIS1", cols.dtype.itemsize, "width of table in bytes") self._header.set( "TFIELDS", ncols, "number of fields in each row", after="GCOUNT" ) self._header.set( "ZIMAGE", True, "extension contains compressed image", after=after ) self._header.set("ZBITPIX", zbitpix, bitpix_comment, after="ZIMAGE") self._header.set( "ZNAXIS", self._image_header["NAXIS"], naxis_comment, after="ZBITPIX" ) # Strip the table header of all the ZNAZISn and ZTILEn keywords # that may be left over from the previous data for idx in itertools.count(1): try: del self._header["ZNAXIS" + str(idx)] del self._header["ZTILE" + str(idx)] except KeyError: break # Verify that any input tile size parameter is the appropriate # size to match the HDU's data. naxis = self._image_header["NAXIS"] if not tile_size: tile_size = [] elif len(tile_size) != naxis: warnings.warn( "Provided tile size not appropriate for the data. " "Default tile size will be used.", AstropyUserWarning, ) tile_size = [] # Set default tile dimensions for HCOMPRESS_1 if compression_type == "HCOMPRESS_1": if self._image_header["NAXIS1"] < 4 or self._image_header["NAXIS2"] < 4: raise ValueError("Hcompress minimum image dimension is 4 pixels") elif tile_size: if tile_size[0] < 4 or tile_size[1] < 4: # user specified tile size is too small raise ValueError("Hcompress minimum tile dimension is 4 pixels") major_dims = len([ts for ts in tile_size if ts > 1]) if major_dims > 2: raise ValueError( "HCOMPRESS can only support 2-dimensional tile sizes." "All but two of the tile_size dimensions must be set " "to 1." ) if tile_size and (tile_size[0] == 0 and tile_size[1] == 0): # compress the whole image as a single tile tile_size[0] = self._image_header["NAXIS1"] tile_size[1] = self._image_header["NAXIS2"] for i in range(2, naxis): # set all higher tile dimensions = 1 tile_size[i] = 1 elif not tile_size: # The Hcompress algorithm is inherently 2D in nature, so the # row by row tiling that is used for other compression # algorithms is not appropriate. If the image has less than 30 # rows, then the entire image will be compressed as a single # tile. Otherwise the tiles will consist of 16 rows of the # image. This keeps the tiles to a reasonable size, and it # also includes enough rows to allow good compression # efficiency. It the last tile of the image happens to contain # less than 4 rows, then find another tile size with between 14 # and 30 rows (preferably even), so that the last tile has at # least 4 rows. # 1st tile dimension is the row length of the image tile_size.append(self._image_header["NAXIS1"]) if self._image_header["NAXIS2"] <= 30: tile_size.append(self._image_header["NAXIS1"]) else: # look for another good tile dimension naxis2 = self._image_header["NAXIS2"] for dim in [16, 24, 20, 30, 28, 26, 22, 18, 14]: if naxis2 % dim == 0 or naxis2 % dim > 3: tile_size.append(dim) break else: tile_size.append(17) for i in range(2, naxis): # set all higher tile dimensions = 1 tile_size.append(1) # check if requested tile size causes the last tile to have # less than 4 pixels remain = self._image_header["NAXIS1"] % tile_size[0] # 1st dimen if remain > 0 and remain < 4: tile_size[0] += 1 # try increasing tile size by 1 remain = self._image_header["NAXIS1"] % tile_size[0] if remain > 0 and remain < 4: raise ValueError( "Last tile along 1st dimension has less than 4 pixels" ) remain = self._image_header["NAXIS2"] % tile_size[1] # 2nd dimen if remain > 0 and remain < 4: tile_size[1] += 1 # try increasing tile size by 1 remain = self._image_header["NAXIS2"] % tile_size[1] if remain > 0 and remain < 4: raise ValueError( "Last tile along 2nd dimension has less than 4 pixels" ) # Set up locations for writing the next cards in the header. last_znaxis = "ZNAXIS" if self._image_header["NAXIS"] > 0: after1 = "ZNAXIS1" else: after1 = "ZNAXIS" # Calculate the number of rows in the output table and # write the ZNAXISn and ZTILEn cards to the table header. nrows = 0 for idx, axis in enumerate(self._axes): naxis = "NAXIS" + str(idx + 1) znaxis = "ZNAXIS" + str(idx + 1) ztile = "ZTILE" + str(idx + 1) if tile_size and len(tile_size) >= idx + 1: ts = tile_size[idx] else: if ztile not in self._header: # Default tile size if not idx: ts = self._image_header["NAXIS1"] else: ts = 1 else: ts = self._header[ztile] tile_size.append(ts) if not nrows: nrows = (axis - 1) // ts + 1 else: nrows *= (axis - 1) // ts + 1 if image_header and naxis in image_header: self._header.set( znaxis, axis, image_header.comments[naxis], after=last_znaxis ) else: self._header.set( znaxis, axis, "length of original image axis", after=last_znaxis ) self._header.set(ztile, ts, "size of tiles to be compressed", after=after1) last_znaxis = znaxis after1 = ztile # Set the NAXIS2 header card in the table hdu to the number of # rows in the table. self._header.set("NAXIS2", nrows, "number of rows in table") self.columns = cols # Set the compression parameters in the table header. # First, setup the values to be used for the compression parameters # in case none were passed in. This will be either the value # already in the table header for that parameter or the default # value. for idx in itertools.count(1): zname = "ZNAME" + str(idx) if zname not in self._header: break zval = "ZVAL" + str(idx) if self._header[zname] == "NOISEBIT": if quantize_level is None: quantize_level = self._header[zval] if self._header[zname] == "SCALE ": if hcomp_scale is None: hcomp_scale = self._header[zval] if self._header[zname] == "SMOOTH ": if hcomp_smooth is None: hcomp_smooth = self._header[zval] if quantize_level is None: quantize_level = DEFAULT_QUANTIZE_LEVEL if hcomp_scale is None: hcomp_scale = DEFAULT_HCOMP_SCALE if hcomp_smooth is None: hcomp_smooth = DEFAULT_HCOMP_SCALE # Next, strip the table header of all the ZNAMEn and ZVALn keywords # that may be left over from the previous data for idx in itertools.count(1): zname = "ZNAME" + str(idx) if zname not in self._header: break zval = "ZVAL" + str(idx) del self._header[zname] del self._header[zval] # Finally, put the appropriate keywords back based on the # compression type. after_keyword = "ZCMPTYPE" idx = 1 if compression_type == "RICE_1": self._header.set( "ZNAME1", "BLOCKSIZE", "compression block size", after=after_keyword ) self._header.set( "ZVAL1", DEFAULT_BLOCK_SIZE, "pixels per block", after="ZNAME1" ) self._header.set( "ZNAME2", "BYTEPIX", "bytes per pixel (1, 2, 4, or 8)", after="ZVAL1" ) if self._header["ZBITPIX"] == 8: bytepix = 1 elif self._header["ZBITPIX"] == 16: bytepix = 2 else: bytepix = DEFAULT_BYTE_PIX self._header.set( "ZVAL2", bytepix, "bytes per pixel (1, 2, 4, or 8)", after="ZNAME2" ) after_keyword = "ZVAL2" idx = 3 elif compression_type == "HCOMPRESS_1": self._header.set( "ZNAME1", "SCALE", "HCOMPRESS scale factor", after=after_keyword ) self._header.set( "ZVAL1", hcomp_scale, "HCOMPRESS scale factor", after="ZNAME1" ) self._header.set( "ZNAME2", "SMOOTH", "HCOMPRESS smooth option", after="ZVAL1" ) self._header.set( "ZVAL2", hcomp_smooth, "HCOMPRESS smooth option", after="ZNAME2" ) after_keyword = "ZVAL2" idx = 3 if self._image_header["BITPIX"] < 0: # floating point image self._header.set( "ZNAME" + str(idx), "NOISEBIT", "floating point quantization level", after=after_keyword, ) self._header.set( "ZVAL" + str(idx), quantize_level, "floating point quantization level", after="ZNAME" + str(idx), ) # Add the dither method and seed if quantize_method: if quantize_method not in [ NO_DITHER, SUBTRACTIVE_DITHER_1, SUBTRACTIVE_DITHER_2, ]: name = QUANTIZE_METHOD_NAMES[DEFAULT_QUANTIZE_METHOD] warnings.warn( "Unknown quantization method provided. " "Default method ({}) used.".format(name) ) quantize_method = DEFAULT_QUANTIZE_METHOD if quantize_method == NO_DITHER: zquantiz_comment = "No dithering during quantization" else: zquantiz_comment = "Pixel Quantization Algorithm" self._header.set( "ZQUANTIZ", QUANTIZE_METHOD_NAMES[quantize_method], zquantiz_comment, after="ZVAL" + str(idx), ) else: # If the ZQUANTIZ keyword is missing the default is to assume # no dithering, rather than whatever DEFAULT_QUANTIZE_METHOD # is set to quantize_method = self._header.get("ZQUANTIZ", NO_DITHER) if isinstance(quantize_method, str): for k, v in QUANTIZE_METHOD_NAMES.items(): if v.upper() == quantize_method: quantize_method = k break else: quantize_method = NO_DITHER if quantize_method == NO_DITHER: if "ZDITHER0" in self._header: # If dithering isn't being used then there's no reason to # keep the ZDITHER0 keyword del self._header["ZDITHER0"] else: if dither_seed: dither_seed = self._generate_dither_seed(dither_seed) elif "ZDITHER0" in self._header: dither_seed = self._header["ZDITHER0"] else: dither_seed = self._generate_dither_seed(DEFAULT_DITHER_SEED) self._header.set( "ZDITHER0", dither_seed, "dithering offset when quantizing floats", after="ZQUANTIZ", ) if image_header: # Move SIMPLE card from the image header to the # table header as ZSIMPLE card. if "SIMPLE" in image_header: self._header.set( "ZSIMPLE", image_header["SIMPLE"], image_header.comments["SIMPLE"], before="ZBITPIX", ) # Move EXTEND card from the image header to the # table header as ZEXTEND card. if "EXTEND" in image_header: self._header.set( "ZEXTEND", image_header["EXTEND"], image_header.comments["EXTEND"] ) # Move BLOCKED card from the image header to the # table header as ZBLOCKED card. if "BLOCKED" in image_header: self._header.set( "ZBLOCKED", image_header["BLOCKED"], image_header.comments["BLOCKED"], ) # Move XTENSION card from the image header to the # table header as ZTENSION card. # Since we only handle compressed IMAGEs, ZTENSION should # always be IMAGE, even if the caller has passed in a header # for some other type of extension. if "XTENSION" in image_header: self._header.set( "ZTENSION", "IMAGE", image_header.comments["XTENSION"], before="ZBITPIX", ) # Move PCOUNT and GCOUNT cards from image header to the table # header as ZPCOUNT and ZGCOUNT cards. if "PCOUNT" in image_header: self._header.set( "ZPCOUNT", image_header["PCOUNT"], image_header.comments["PCOUNT"], after=last_znaxis, ) if "GCOUNT" in image_header: self._header.set( "ZGCOUNT", image_header["GCOUNT"], image_header.comments["GCOUNT"], after="ZPCOUNT", ) # Move CHECKSUM and DATASUM cards from the image header to the # table header as XHECKSUM and XDATASUM cards. if "CHECKSUM" in image_header: self._header.set( "ZHECKSUM", image_header["CHECKSUM"], image_header.comments["CHECKSUM"], ) if "DATASUM" in image_header: self._header.set( "ZDATASUM", image_header["DATASUM"], image_header.comments["DATASUM"], ) else: # Move XTENSION card from the image header to the # table header as ZTENSION card. # Since we only handle compressed IMAGEs, ZTENSION should # always be IMAGE, even if the caller has passed in a header # for some other type of extension. if "XTENSION" in self._image_header: self._header.set( "ZTENSION", "IMAGE", self._image_header.comments["XTENSION"], before="ZBITPIX", ) # Move PCOUNT and GCOUNT cards from image header to the table # header as ZPCOUNT and ZGCOUNT cards. if "PCOUNT" in self._image_header: self._header.set( "ZPCOUNT", self._image_header["PCOUNT"], self._image_header.comments["PCOUNT"], after=last_znaxis, ) if "GCOUNT" in self._image_header: self._header.set( "ZGCOUNT", self._image_header["GCOUNT"], self._image_header.comments["GCOUNT"], after="ZPCOUNT", ) # When we have an image checksum we need to ensure that the same # number of blank cards exist in the table header as there were in # the image header. This allows those blank cards to be carried # over to the image header when the hdu is uncompressed. if "ZHECKSUM" in self._header: required_blanks = image_header._countblanks() image_blanks = self._image_header._countblanks() table_blanks = self._header._countblanks() for _ in range(required_blanks - image_blanks): self._image_header.append() table_blanks += 1 for _ in range(required_blanks - table_blanks): self._header.append() @lazyproperty def data(self): # The data attribute is the image data (not the table data). data = decompress_hdu(self) if data is None: return data # Scale the data if necessary if self._orig_bzero != 0 or self._orig_bscale != 1: new_dtype = self._dtype_for_bitpix() data = np.array(data, dtype=new_dtype) if "BLANK" in self._header: blanks = data == np.array(self._header["BLANK"], dtype="int32") else: blanks = None if self._bscale != 1: np.multiply(data, self._bscale, data) if self._bzero != 0: # We have to explicitly cast self._bzero to prevent numpy from # raising an error when doing self.data += self._bzero, and we # do this instead of self.data = self.data + self._bzero to # avoid doubling memory usage. np.add(data, self._bzero, out=data, casting="unsafe") if blanks is not None: data = np.where(blanks, np.nan, data) # Right out of _ImageBaseHDU.data self._update_header_scale_info(data.dtype) return data @data.setter def data(self, data): if (data is not None) and ( not isinstance(data, np.ndarray) or data.dtype.fields is not None ): raise TypeError( "CompImageHDU data has incorrect type:{}; dtype.fields = {}".format( type(data), data.dtype.fields ) ) @lazyproperty def compressed_data(self): # First we will get the table data (the compressed # data) from the file, if there is any. compressed_data = super().data if isinstance(compressed_data, np.rec.recarray): # Make sure not to use 'del self.data' so we don't accidentally # go through the self.data.fdel and close the mmap underlying # the compressed_data array del self.__dict__["data"] return compressed_data else: # This will actually set self.compressed_data with the # pre-allocated space for the compression data; this is something I # might do away with in the future self._update_compressed_data() return self.compressed_data @compressed_data.deleter def compressed_data(self): # Deleting the compressed_data attribute has to be handled # with a little care to prevent a reference leak # First delete the ._coldefs attributes under it to break a possible # reference cycle if "compressed_data" in self.__dict__: del self.__dict__["compressed_data"]._coldefs # Now go ahead and delete from self.__dict__; normally # lazyproperty.__delete__ does this for us, but we can prempt it to # do some additional cleanup del self.__dict__["compressed_data"] # If this file was mmap'd, numpy.memmap will hold open a file # handle until the underlying mmap object is garbage-collected; # since this reference leak can sometimes hang around longer than # welcome go ahead and force a garbage collection gc.collect() @property def shape(self): """ Shape of the image array--should be equivalent to ``self.data.shape``. """ # Determine from the values read from the header return tuple(reversed(self._axes)) @lazyproperty def header(self): # The header attribute is the header for the image data. It # is not actually stored in the object dictionary. Instead, # the _image_header is stored. If the _image_header attribute # has already been defined we just return it. If not, we must # create it from the table header (the _header attribute). if hasattr(self, "_image_header"): return self._image_header # Clean up any possible doubled EXTNAME keywords that use # the default. Do this on the original header to ensure # duplicates are removed cleanly. self._remove_unnecessary_default_extnames(self._header) # Start with a copy of the table header. image_header = self._header.copy() # Delete cards that are related to the table. And move # the values of those cards that relate to the image from # their corresponding table cards. These include # ZBITPIX -> BITPIX, ZNAXIS -> NAXIS, and ZNAXISn -> NAXISn. # (Note: Used set here instead of list in case there are any duplicate # keywords, which there may be in some pathological cases: # https://github.com/astropy/astropy/issues/2750 for keyword in set(image_header): if CompImageHeader._is_reserved_keyword(keyword, warn=False): del image_header[keyword] if "ZSIMPLE" in self._header: image_header.set( "SIMPLE", self._header["ZSIMPLE"], self._header.comments["ZSIMPLE"], before=0, ) del image_header["XTENSION"] elif "ZTENSION" in self._header: if self._header["ZTENSION"] != "IMAGE": warnings.warn( "ZTENSION keyword in compressed extension != 'IMAGE'", AstropyUserWarning, ) image_header.set( "XTENSION", "IMAGE", self._header.comments["ZTENSION"], before=0 ) else: image_header.set("XTENSION", "IMAGE", before=0) image_header.set( "BITPIX", self._header["ZBITPIX"], self._header.comments["ZBITPIX"], before=1, ) image_header.set( "NAXIS", self._header["ZNAXIS"], self._header.comments["ZNAXIS"], before=2 ) last_naxis = "NAXIS" for idx in range(image_header["NAXIS"]): znaxis = "ZNAXIS" + str(idx + 1) naxis = znaxis[1:] image_header.set( naxis, self._header[znaxis], self._header.comments[znaxis], after=last_naxis, ) last_naxis = naxis # Delete any other spurious NAXISn keywords: naxis = image_header["NAXIS"] for keyword in list(image_header["NAXIS?*"]): try: n = int(keyword[5:]) except Exception: continue if n > naxis: del image_header[keyword] # Although PCOUNT and GCOUNT are considered mandatory for IMAGE HDUs, # ZPCOUNT and ZGCOUNT are optional, probably because for IMAGE HDUs # their values are always 0 and 1 respectively if "ZPCOUNT" in self._header: image_header.set( "PCOUNT", self._header["ZPCOUNT"], self._header.comments["ZPCOUNT"], after=last_naxis, ) else: image_header.set("PCOUNT", 0, after=last_naxis) if "ZGCOUNT" in self._header: image_header.set( "GCOUNT", self._header["ZGCOUNT"], self._header.comments["ZGCOUNT"], after="PCOUNT", ) else: image_header.set("GCOUNT", 1, after="PCOUNT") if "ZEXTEND" in self._header: image_header.set( "EXTEND", self._header["ZEXTEND"], self._header.comments["ZEXTEND"] ) if "ZBLOCKED" in self._header: image_header.set( "BLOCKED", self._header["ZBLOCKED"], self._header.comments["ZBLOCKED"] ) # Move the ZHECKSUM and ZDATASUM cards to the image header # as CHECKSUM and DATASUM if "ZHECKSUM" in self._header: image_header.set( "CHECKSUM", self._header["ZHECKSUM"], self._header.comments["ZHECKSUM"] ) if "ZDATASUM" in self._header: image_header.set( "DATASUM", self._header["ZDATASUM"], self._header.comments["ZDATASUM"] ) # Remove the EXTNAME card if the value in the table header # is the default value of COMPRESSED_IMAGE. if "EXTNAME" in image_header and image_header["EXTNAME"] == self._default_name: del image_header["EXTNAME"] # Remove the PCOUNT GCOUNT cards if the uncompressed header is # from a primary HDU if "SIMPLE" in image_header: del image_header["PCOUNT"] del image_header["GCOUNT"] # Look to see if there are any blank cards in the table # header. If there are, there should be the same number # of blank cards in the image header. Add blank cards to # the image header to make it so. table_blanks = self._header._countblanks() image_blanks = image_header._countblanks() for _ in range(table_blanks - image_blanks): image_header.append() # Create the CompImageHeader that syncs with the table header, and save # it off to self._image_header so it can be referenced later # unambiguously self._image_header = CompImageHeader(self._header, image_header) return self._image_header def _summary(self): """ Summarize the HDU: name, dimensions, and formats. """ class_name = self.__class__.__name__ # if data is touched, use data info. if self._data_loaded: if self.data is None: _shape, _format = (), "" else: # the shape will be in the order of NAXIS's which is the # reverse of the numarray shape _shape = list(self.data.shape) _format = self.data.dtype.name _shape.reverse() _shape = tuple(_shape) _format = _format[_format.rfind(".") + 1 :] # if data is not touched yet, use header info. else: _shape = () for idx in range(self.header["NAXIS"]): _shape += (self.header["NAXIS" + str(idx + 1)],) _format = BITPIX2DTYPE[self.header["BITPIX"]] return (self.name, self.ver, class_name, len(self.header), _shape, _format) def _update_compressed_data(self): """ Compress the image data so that it may be written to a file. """ # Check to see that the image_header matches the image data image_bitpix = DTYPE2BITPIX[self.data.dtype.name] if image_bitpix != self._orig_bitpix or self.data.shape != self.shape: self._update_header_data(self.header) # TODO: This is copied right out of _ImageBaseHDU._writedata_internal; # it would be cool if we could use an internal ImageHDU and use that to # write to a buffer for compression or something. See ticket #88 # deal with unsigned integer 16, 32 and 64 data old_data = self.data if _is_pseudo_integer(self.data.dtype): # Convert the unsigned array to signed self.data = np.array( self.data - _pseudo_zero(self.data.dtype), dtype=f"=i{self.data.dtype.itemsize}", ) try: nrows = self._header["NAXIS2"] tbsize = self._header["NAXIS1"] * nrows self._header["PCOUNT"] = 0 if "THEAP" in self._header: del self._header["THEAP"] self._theap = tbsize # First delete the original compressed data, if it exists del self.compressed_data # Make sure that the data is contiguous otherwise CFITSIO # will not write the expected data self.data = np.ascontiguousarray(self.data) # Compress the data. # compress_hdu returns the size of the heap for the written # compressed image table heapsize, self.compressed_data = compress_hdu(self) finally: self.data = old_data # CFITSIO will write the compressed data in big-endian order dtype = self.columns.dtype.newbyteorder(">") buf = self.compressed_data compressed_data = buf[: self._theap].view(dtype=dtype, type=np.rec.recarray) self.compressed_data = compressed_data.view(FITS_rec) self.compressed_data._coldefs = self.columns self.compressed_data._heapoffset = self._theap self.compressed_data._heapsize = heapsize def scale(self, type=None, option="old", bscale=1, bzero=0): """ Scale image data by using ``BSCALE`` and ``BZERO``. Calling this method will scale ``self.data`` and update the keywords of ``BSCALE`` and ``BZERO`` in ``self._header`` and ``self._image_header``. This method should only be used right before writing to the output file, as the data will be scaled and is therefore not very usable after the call. Parameters ---------- type : str, optional destination data type, use a string representing a numpy dtype name, (e.g. ``'uint8'``, ``'int16'``, ``'float32'`` etc.). If is `None`, use the current data type. option : str, optional how to scale the data: if ``"old"``, use the original ``BSCALE`` and ``BZERO`` values when the data was read/created. If ``"minmax"``, use the minimum and maximum of the data to scale. The option will be overwritten by any user-specified bscale/bzero values. bscale, bzero : int, optional user specified ``BSCALE`` and ``BZERO`` values. """ if self.data is None: return # Determine the destination (numpy) data type if type is None: type = BITPIX2DTYPE[self._bitpix] _type = getattr(np, type) # Determine how to scale the data # bscale and bzero takes priority if bscale != 1 or bzero != 0: _scale = bscale _zero = bzero else: if option == "old": _scale = self._orig_bscale _zero = self._orig_bzero elif option == "minmax": if isinstance(_type, np.floating): _scale = 1 _zero = 0 else: _min = np.minimum.reduce(self.data.flat) _max = np.maximum.reduce(self.data.flat) if _type == np.uint8: # uint8 case _zero = _min _scale = (_max - _min) / (2.0**8 - 1) else: _zero = (_max + _min) / 2.0 # throw away -2^N _scale = (_max - _min) / (2.0 ** (8 * _type.bytes) - 2) # Do the scaling if _zero != 0: # We have to explicitly cast self._bzero to prevent numpy from # raising an error when doing self.data -= _zero, and we # do this instead of self.data = self.data - _zero to # avoid doubling memory usage. np.subtract(self.data, _zero, out=self.data, casting="unsafe") self.header["BZERO"] = _zero else: # Delete from both headers for header in (self.header, self._header): with suppress(KeyError): del header["BZERO"] if _scale != 1: self.data /= _scale self.header["BSCALE"] = _scale else: for header in (self.header, self._header): with suppress(KeyError): del header["BSCALE"] if self.data.dtype.type != _type: self.data = np.array(np.around(self.data), dtype=_type) # 0.7.7.1 # Update the BITPIX Card to match the data self._bitpix = DTYPE2BITPIX[self.data.dtype.name] self._bzero = self.header.get("BZERO", 0) self._bscale = self.header.get("BSCALE", 1) # Update BITPIX for the image header specifically # TODO: Make this more clear by using self._image_header, but only once # this has been fixed so that the _image_header attribute is guaranteed # to be valid self.header["BITPIX"] = self._bitpix # Update the table header to match the scaled data self._update_header_data(self.header) # Since the image has been manually scaled, the current # bitpix/bzero/bscale now serve as the 'original' scaling of the image, # as though the original image has been completely replaced self._orig_bitpix = self._bitpix self._orig_bzero = self._bzero self._orig_bscale = self._bscale def _prewriteto(self, checksum=False, inplace=False): if self._scale_back: self.scale(BITPIX2DTYPE[self._orig_bitpix]) if self._has_data: self._update_compressed_data() # Use methods in the superclass to update the header with # scale/checksum keywords based on the data type of the image data self._update_pseudo_int_scale_keywords() # Shove the image header and data into a new ImageHDU and use that # to compute the image checksum image_hdu = ImageHDU(data=self.data, header=self.header) image_hdu._update_checksum(checksum) if "CHECKSUM" in image_hdu.header: # This will also pass through to the ZHECKSUM keyword and # ZDATASUM keyword self._image_header.set( "CHECKSUM", image_hdu.header["CHECKSUM"], image_hdu.header.comments["CHECKSUM"], ) if "DATASUM" in image_hdu.header: self._image_header.set( "DATASUM", image_hdu.header["DATASUM"], image_hdu.header.comments["DATASUM"], ) # Store a temporary backup of self.data in a different attribute; # see below self._imagedata = self.data # Now we need to perform an ugly hack to set the compressed data as # the .data attribute on the HDU so that the call to _writedata # handles it properly self.__dict__["data"] = self.compressed_data return super()._prewriteto(checksum=checksum, inplace=inplace) def _writeheader(self, fileobj): """ Bypasses `BinTableHDU._writeheader()` which updates the header with metadata about the data that is meaningless here; another reason why this class maybe shouldn't inherit directly from BinTableHDU... """ return ExtensionHDU._writeheader(self, fileobj) def _writedata(self, fileobj): """ Wrap the basic ``_writedata`` method to restore the ``.data`` attribute to the uncompressed image data in the case of an exception. """ try: return super()._writedata(fileobj) finally: # Restore the .data attribute to its rightful value (if any) if hasattr(self, "_imagedata"): self.__dict__["data"] = self._imagedata del self._imagedata else: del self.data def _close(self, closed=True): super()._close(closed=closed) # Also make sure to close access to the compressed data mmaps if ( closed and self._data_loaded and _get_array_mmap(self.compressed_data) is not None ): del self.compressed_data # TODO: This was copied right out of _ImageBaseHDU; get rid of it once we # find a way to rewrite this class as either a subclass or wrapper for an # ImageHDU def _dtype_for_bitpix(self): """ Determine the dtype that the data should be converted to depending on the BITPIX value in the header, and possibly on the BSCALE value as well. Returns None if there should not be any change. """ bitpix = self._orig_bitpix # Handle possible conversion to uints if enabled if self._uint and self._orig_bscale == 1: for bits, dtype in ( (16, np.dtype("uint16")), (32, np.dtype("uint32")), (64, np.dtype("uint64")), ): if bitpix == bits and self._orig_bzero == 1 << (bits - 1): return dtype if bitpix > 16: # scale integers to Float64 return np.dtype("float64") elif bitpix > 0: # scale integers to Float32 return np.dtype("float32") def _update_header_scale_info(self, dtype=None): if not self._do_not_scale_image_data and not ( self._orig_bzero == 0 and self._orig_bscale == 1 ): for keyword in ["BSCALE", "BZERO"]: # Make sure to delete from both the image header and the table # header; later this will be streamlined for header in (self.header, self._header): with suppress(KeyError): del header[keyword] # Since _update_header_scale_info can, currently, be # called *after* _prewriteto(), replace these with # blank cards so the header size doesn't change header.append() if dtype is None: dtype = self._dtype_for_bitpix() if dtype is not None: self.header["BITPIX"] = DTYPE2BITPIX[dtype.name] self._bzero = 0 self._bscale = 1 self._bitpix = self.header["BITPIX"] def _generate_dither_seed(self, seed): if not _is_int(seed): raise TypeError("Seed must be an integer") if not -1 <= seed <= 10000: raise ValueError( "Seed for random dithering must be either between 1 and " "10000 inclusive, 0 for autogeneration from the system " "clock, or -1 for autogeneration from a checksum of the first " "image tile (got {})".format(seed) ) if seed == DITHER_SEED_CHECKSUM: # Determine the tile dimensions from the ZTILEn keywords naxis = self._header["ZNAXIS"] tile_dims = [self._header[f"ZTILE{idx + 1}"] for idx in range(naxis)] tile_dims.reverse() # Get the first tile by using the tile dimensions as the end # indices of slices (starting from 0) first_tile = self.data[tuple(slice(d) for d in tile_dims)] # The checksum algorithm used is literally just the sum of the bytes # of the tile data (not its actual floating point values). Integer # overflow is irrelevant. csum = first_tile.view(dtype="uint8").sum() # Since CFITSIO uses an unsigned long (which may be different on # different platforms) go ahead and truncate the sum to its # unsigned long value and take the result modulo 10000 return (ctypes.c_ulong(csum).value % 10000) + 1 elif seed == DITHER_SEED_CLOCK: # This isn't exactly the same algorithm as CFITSIO, but that's okay # since the result is meant to be arbitrary. The primary difference # is that CFITSIO incorporates the HDU number into the result in # the hopes of heading off the possibility of the same seed being # generated for two HDUs at the same time. Here instead we just # add in the HDU object's id return ( (sum(int(x) for x in math.modf(time.time())) + id(self)) % 10000 ) + 1 else: return seed
a237902ce0d26f002282eed7932ddec71908799c782b1a53b0ec3e9ef21c9afa
# Licensed under a 3-clause BSD style license - see PYFITS.rst import datetime import os import sys import warnings from contextlib import suppress from inspect import Parameter, signature import numpy as np from astropy.io.fits import conf from astropy.io.fits.file import _File from astropy.io.fits.header import Header, _BasicHeader, _DelayedHeader, _pad_length from astropy.io.fits.util import ( _extract_number, _free_space_check, _get_array_mmap, _is_int, _is_pseudo_integer, _pseudo_zero, decode_ascii, first, itersubclasses, ) from astropy.io.fits.verify import _ErrList, _Verify from astropy.utils import lazyproperty from astropy.utils.exceptions import AstropyUserWarning __all__ = [ "DELAYED", # classes "InvalidHDUException", "ExtensionHDU", "NonstandardExtHDU", ] class _Delayed: pass DELAYED = _Delayed() BITPIX2DTYPE = { 8: "uint8", 16: "int16", 32: "int32", 64: "int64", -32: "float32", -64: "float64", } """Maps FITS BITPIX values to Numpy dtype names.""" DTYPE2BITPIX = { "int8": 8, "uint8": 8, "int16": 16, "uint16": 16, "int32": 32, "uint32": 32, "int64": 64, "uint64": 64, "float32": -32, "float64": -64, } """ Maps Numpy dtype names to FITS BITPIX values (this includes unsigned integers, with the assumption that the pseudo-unsigned integer convention will be used in this case. """ class InvalidHDUException(Exception): """ A custom exception class used mainly to signal to _BaseHDU.__new__ that an HDU cannot possibly be considered valid, and must be assumed to be corrupted. """ def _hdu_class_from_header(cls, header): """ Iterates through the subclasses of _BaseHDU and uses that class's match_header() method to determine which subclass to instantiate. It's important to be aware that the class hierarchy is traversed in a depth-last order. Each match_header() should identify an HDU type as uniquely as possible. Abstract types may choose to simply return False or raise NotImplementedError to be skipped. If any unexpected exceptions are raised while evaluating match_header(), the type is taken to be _CorruptedHDU. Used primarily by _BaseHDU._readfrom_internal and _BaseHDU._from_data to find an appropriate HDU class to use based on values in the header. """ klass = cls # By default, if no subclasses are defined if header: for c in reversed(list(itersubclasses(cls))): try: # HDU classes built into astropy.io.fits are always considered, # but extension HDUs must be explicitly registered if not ( c.__module__.startswith("astropy.io.fits.") or c in cls._hdu_registry ): continue if c.match_header(header): klass = c break except NotImplementedError: continue except Exception as exc: warnings.warn( "An exception occurred matching an HDU header to the " "appropriate HDU type: {}".format(exc), AstropyUserWarning, ) warnings.warn( "The HDU will be treated as corrupted.", AstropyUserWarning ) klass = _CorruptedHDU del exc break return klass # TODO: Come up with a better __repr__ for HDUs (and for HDULists, for that # matter) class _BaseHDU: """Base class for all HDU (header data unit) classes.""" _hdu_registry = set() # This HDU type is part of the FITS standard _standard = True # Byte to use for padding out blocks _padding_byte = "\x00" _default_name = "" # _header uses a descriptor to delay the loading of the fits.Header object # until it is necessary. _header = _DelayedHeader() def __init__(self, data=None, header=None, *args, **kwargs): if header is None: header = Header() self._header = header self._header_str = None self._file = None self._buffer = None self._header_offset = None self._data_offset = None self._data_size = None # This internal variable is used to track whether the data attribute # still points to the same data array as when the HDU was originally # created (this does not track whether the data is actually the same # content-wise) self._data_replaced = False self._data_needs_rescale = False self._new = True self._output_checksum = False if "DATASUM" in self._header and "CHECKSUM" not in self._header: self._output_checksum = "datasum" elif "CHECKSUM" in self._header: self._output_checksum = True def __init_subclass__(cls, **kwargs): # Add the same data.deleter to all HDUs with a data property. # It's unfortunate, but there's otherwise no straightforward way # that a property can inherit setters/deleters of the property of the # same name on base classes. data_prop = cls.__dict__.get("data", None) if isinstance(data_prop, (lazyproperty, property)) and data_prop.fdel is None: # Don't do anything if the class has already explicitly # set the deleter for its data property def data(self): # The deleter if self._file is not None and self._data_loaded: # sys.getrefcount is CPython specific and not on PyPy. has_getrefcount = hasattr(sys, "getrefcount") if has_getrefcount: data_refcount = sys.getrefcount(self.data) # Manually delete *now* so that FITS_rec.__del__ # cleanup can happen if applicable del self.__dict__["data"] # Don't even do this unless the *only* reference to the # .data array was the one we're deleting by deleting # this attribute; if any other references to the array # are hanging around (perhaps the user ran ``data = # hdu.data``) don't even consider this: if has_getrefcount and data_refcount == 2: self._file._maybe_close_mmap() cls.data = data_prop.deleter(data) return super().__init_subclass__(**kwargs) @property def header(self): return self._header @header.setter def header(self, value): self._header = value @property def name(self): # Convert the value to a string to be flexible in some pathological # cases (see ticket #96) return str(self._header.get("EXTNAME", self._default_name)) @name.setter def name(self, value): if not isinstance(value, str): raise TypeError("'name' attribute must be a string") if not conf.extension_name_case_sensitive: value = value.upper() if "EXTNAME" in self._header: self._header["EXTNAME"] = value else: self._header["EXTNAME"] = (value, "extension name") @property def ver(self): return self._header.get("EXTVER", 1) @ver.setter def ver(self, value): if not _is_int(value): raise TypeError("'ver' attribute must be an integer") if "EXTVER" in self._header: self._header["EXTVER"] = value else: self._header["EXTVER"] = (value, "extension value") @property def level(self): return self._header.get("EXTLEVEL", 1) @level.setter def level(self, value): if not _is_int(value): raise TypeError("'level' attribute must be an integer") if "EXTLEVEL" in self._header: self._header["EXTLEVEL"] = value else: self._header["EXTLEVEL"] = (value, "extension level") @property def is_image(self): return self.name == "PRIMARY" or ( "XTENSION" in self._header and ( self._header["XTENSION"] == "IMAGE" or ( self._header["XTENSION"] == "BINTABLE" and "ZIMAGE" in self._header and self._header["ZIMAGE"] is True ) ) ) @property def _data_loaded(self): return "data" in self.__dict__ and self.data is not DELAYED @property def _has_data(self): return self._data_loaded and self.data is not None @classmethod def register_hdu(cls, hducls): cls._hdu_registry.add(hducls) @classmethod def unregister_hdu(cls, hducls): if hducls in cls._hdu_registry: cls._hdu_registry.remove(hducls) @classmethod def match_header(cls, header): raise NotImplementedError @classmethod def fromstring(cls, data, checksum=False, ignore_missing_end=False, **kwargs): """ Creates a new HDU object of the appropriate type from a string containing the HDU's entire header and, optionally, its data. Note: When creating a new HDU from a string without a backing file object, the data of that HDU may be read-only. It depends on whether the underlying string was an immutable Python str/bytes object, or some kind of read-write memory buffer such as a `memoryview`. Parameters ---------- data : str, bytearray, memoryview, ndarray A byte string containing the HDU's header and data. checksum : bool, optional Check the HDU's checksum and/or datasum. ignore_missing_end : bool, optional Ignore a missing end card in the header data. Note that without the end card the end of the header may be ambiguous and resulted in a corrupt HDU. In this case the assumption is that the first 2880 block that does not begin with valid FITS header data is the beginning of the data. **kwargs : optional May consist of additional keyword arguments specific to an HDU type--these correspond to keywords recognized by the constructors of different HDU classes such as `PrimaryHDU`, `ImageHDU`, or `BinTableHDU`. Any unrecognized keyword arguments are simply ignored. """ return cls._readfrom_internal( data, checksum=checksum, ignore_missing_end=ignore_missing_end, **kwargs ) @classmethod def readfrom(cls, fileobj, checksum=False, ignore_missing_end=False, **kwargs): """ Read the HDU from a file. Normally an HDU should be opened with :func:`open` which reads the entire HDU list in a FITS file. But this method is still provided for symmetry with :func:`writeto`. Parameters ---------- fileobj : file-like Input FITS file. The file's seek pointer is assumed to be at the beginning of the HDU. checksum : bool If `True`, verifies that both ``DATASUM`` and ``CHECKSUM`` card values (when present in the HDU header) match the header and data of all HDU's in the file. ignore_missing_end : bool Do not issue an exception when opening a file that is missing an ``END`` card in the last header. """ # TODO: Figure out a way to make it possible for the _File # constructor to be a noop if the argument is already a _File if not isinstance(fileobj, _File): fileobj = _File(fileobj) hdu = cls._readfrom_internal( fileobj, checksum=checksum, ignore_missing_end=ignore_missing_end, **kwargs ) # If the checksum had to be checked the data may have already been read # from the file, in which case we don't want to seek relative fileobj.seek(hdu._data_offset + hdu._data_size, os.SEEK_SET) return hdu def writeto(self, name, output_verify="exception", overwrite=False, checksum=False): """ Write the HDU to a new file. This is a convenience method to provide a user easier output interface if only one HDU needs to be written to a file. Parameters ---------- name : path-like or file-like Output FITS file. If the file object is already opened, it must be opened in a writeable mode. output_verify : str Output verification option. Must be one of ``"fix"``, ``"silentfix"``, ``"ignore"``, ``"warn"``, or ``"exception"``. May also be any combination of ``"fix"`` or ``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception" (e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info. overwrite : bool, optional If ``True``, overwrite the output file if it exists. Raises an ``OSError`` if ``False`` and the output file exists. Default is ``False``. checksum : bool When `True` adds both ``DATASUM`` and ``CHECKSUM`` cards to the header of the HDU when written to the file. """ from .hdulist import HDUList hdulist = HDUList([self]) hdulist.writeto(name, output_verify, overwrite=overwrite, checksum=checksum) @classmethod def _from_data(cls, data, header, **kwargs): """ Instantiate the HDU object after guessing the HDU class from the FITS Header. """ klass = _hdu_class_from_header(cls, header) return klass(data=data, header=header, **kwargs) @classmethod def _readfrom_internal( cls, data, header=None, checksum=False, ignore_missing_end=False, **kwargs ): """ Provides the bulk of the internal implementation for readfrom and fromstring. For some special cases, supports using a header that was already created, and just using the input data for the actual array data. """ hdu_buffer = None hdu_fileobj = None header_offset = 0 if isinstance(data, _File): if header is None: header_offset = data.tell() try: # First we try to read the header with the fast parser # from _BasicHeader, which will read only the standard # 8 character keywords to get the structural keywords # that are needed to build the HDU object. header_str, header = _BasicHeader.fromfile(data) except Exception: # If the fast header parsing failed, then fallback to # the classic Header parser, which has better support # and reporting for the various issues that can be found # in the wild. data.seek(header_offset) header = Header.fromfile(data, endcard=not ignore_missing_end) hdu_fileobj = data data_offset = data.tell() # *after* reading the header else: try: # Test that the given object supports the buffer interface by # ensuring an ndarray can be created from it np.ndarray((), dtype="ubyte", buffer=data) except TypeError: raise TypeError( "The provided object {!r} does not contain an underlying " "memory buffer. fromstring() requires an object that " "supports the buffer interface such as bytes, buffer, " "memoryview, ndarray, etc. This restriction is to ensure " "that efficient access to the array/table data is possible.".format( data ) ) if header is None: def block_iter(nbytes): idx = 0 while idx < len(data): yield data[idx : idx + nbytes] idx += nbytes header_str, header = Header._from_blocks( block_iter, True, "", not ignore_missing_end, True ) if len(data) > len(header_str): hdu_buffer = data elif data: hdu_buffer = data header_offset = 0 data_offset = len(header_str) # Determine the appropriate arguments to pass to the constructor from # self._kwargs. self._kwargs contains any number of optional arguments # that may or may not be valid depending on the HDU type cls = _hdu_class_from_header(cls, header) sig = signature(cls.__init__) new_kwargs = kwargs.copy() if Parameter.VAR_KEYWORD not in (x.kind for x in sig.parameters.values()): # If __init__ accepts arbitrary keyword arguments, then we can go # ahead and pass all keyword arguments; otherwise we need to delete # any that are invalid for key in kwargs: if key not in sig.parameters: del new_kwargs[key] try: hdu = cls(data=DELAYED, header=header, **new_kwargs) except TypeError: # This may happen because some HDU class (e.g. GroupsHDU) wants # to set a keyword on the header, which is not possible with the # _BasicHeader. While HDU classes should not need to modify the # header in general, sometimes this is needed to fix it. So in # this case we build a full Header and try again to create the # HDU object. if isinstance(header, _BasicHeader): header = Header.fromstring(header_str) hdu = cls(data=DELAYED, header=header, **new_kwargs) else: raise # One of these may be None, depending on whether the data came from a # file or a string buffer--later this will be further abstracted hdu._file = hdu_fileobj hdu._buffer = hdu_buffer hdu._header_offset = header_offset # beginning of the header area hdu._data_offset = data_offset # beginning of the data area # data area size, including padding size = hdu.size hdu._data_size = size + _pad_length(size) if isinstance(hdu._header, _BasicHeader): # Delete the temporary _BasicHeader. # We need to do this before an eventual checksum computation, # since it needs to modify temporarily the header # # The header string is stored in the HDU._header_str attribute, # so that it can be used directly when we need to create the # classic Header object, without having to parse again the file. del hdu._header hdu._header_str = header_str # Checksums are not checked on invalid HDU types if checksum and checksum != "remove" and isinstance(hdu, _ValidHDU): hdu._verify_checksum_datasum() return hdu def _get_raw_data(self, shape, code, offset): """ Return raw array from either the HDU's memory buffer or underlying file. """ if isinstance(shape, int): shape = (shape,) if self._buffer: return np.ndarray(shape, dtype=code, buffer=self._buffer, offset=offset) elif self._file: return self._file.readarray(offset=offset, dtype=code, shape=shape) else: return None # TODO: Rework checksum handling so that it's not necessary to add a # checksum argument here # TODO: The BaseHDU class shouldn't even handle checksums since they're # only implemented on _ValidHDU... def _prewriteto(self, checksum=False, inplace=False): self._update_pseudo_int_scale_keywords() # Handle checksum self._update_checksum(checksum) def _update_pseudo_int_scale_keywords(self): """ If the data is signed int 8, unsigned int 16, 32, or 64, add BSCALE/BZERO cards to header. """ if self._has_data and self._standard and _is_pseudo_integer(self.data.dtype): # CompImageHDUs need TFIELDS immediately after GCOUNT, # so BSCALE has to go after TFIELDS if it exists. if "TFIELDS" in self._header: self._header.set("BSCALE", 1, after="TFIELDS") elif "GCOUNT" in self._header: self._header.set("BSCALE", 1, after="GCOUNT") else: self._header.set("BSCALE", 1) self._header.set("BZERO", _pseudo_zero(self.data.dtype), after="BSCALE") def _update_checksum( self, checksum, checksum_keyword="CHECKSUM", datasum_keyword="DATASUM" ): """Update the 'CHECKSUM' and 'DATASUM' keywords in the header (or keywords with equivalent semantics given by the ``checksum_keyword`` and ``datasum_keyword`` arguments--see for example ``CompImageHDU`` for an example of why this might need to be overridden). """ # If the data is loaded it isn't necessarily 'modified', but we have no # way of knowing for sure modified = self._header._modified or self._data_loaded if checksum == "remove": if checksum_keyword in self._header: del self._header[checksum_keyword] if datasum_keyword in self._header: del self._header[datasum_keyword] elif ( modified or self._new or ( checksum and ( "CHECKSUM" not in self._header or "DATASUM" not in self._header or not self._checksum_valid or not self._datasum_valid ) ) ): if checksum == "datasum": self.add_datasum(datasum_keyword=datasum_keyword) elif checksum: self.add_checksum( checksum_keyword=checksum_keyword, datasum_keyword=datasum_keyword ) def _postwriteto(self): # If data is unsigned integer 16, 32 or 64, remove the # BSCALE/BZERO cards if self._has_data and self._standard and _is_pseudo_integer(self.data.dtype): for keyword in ("BSCALE", "BZERO"): with suppress(KeyError): del self._header[keyword] def _writeheader(self, fileobj): offset = 0 with suppress(AttributeError, OSError): offset = fileobj.tell() self._header.tofile(fileobj) try: size = fileobj.tell() - offset except (AttributeError, OSError): size = len(str(self._header)) return offset, size def _writedata(self, fileobj): size = 0 fileobj.flush() try: offset = fileobj.tell() except (AttributeError, OSError): offset = 0 if self._data_loaded or self._data_needs_rescale: if self.data is not None: size += self._writedata_internal(fileobj) # pad the FITS data block # to avoid a bug in the lustre filesystem client, don't # write zero-byte objects if size > 0 and _pad_length(size) > 0: padding = _pad_length(size) * self._padding_byte # TODO: Not that this is ever likely, but if for some odd # reason _padding_byte is > 0x80 this will fail; but really if # somebody's custom fits format is doing that, they're doing it # wrong and should be reprimanded harshly. fileobj.write(padding.encode("ascii")) size += len(padding) else: # The data has not been modified or does not need need to be # rescaled, so it can be copied, unmodified, directly from an # existing file or buffer size += self._writedata_direct_copy(fileobj) # flush, to make sure the content is written fileobj.flush() # return both the location and the size of the data area return offset, size def _writedata_internal(self, fileobj): """ The beginning and end of most _writedata() implementations are the same, but the details of writing the data array itself can vary between HDU types, so that should be implemented in this method. Should return the size in bytes of the data written. """ fileobj.writearray(self.data) return self.data.size * self.data.itemsize def _writedata_direct_copy(self, fileobj): """Copies the data directly from one file/buffer to the new file. For now this is handled by loading the raw data from the existing data (including any padding) via a memory map or from an already in-memory buffer and using Numpy's existing file-writing facilities to write to the new file. If this proves too slow a more direct approach may be used. """ raw = self._get_raw_data(self._data_size, "ubyte", self._data_offset) if raw is not None: fileobj.writearray(raw) return raw.nbytes else: return 0 # TODO: This is the start of moving HDU writing out of the _File class; # Though right now this is an internal private method (though still used by # HDUList, eventually the plan is to have this be moved into writeto() # somehow... def _writeto(self, fileobj, inplace=False, copy=False): try: dirname = os.path.dirname(fileobj._file.name) except (AttributeError, TypeError): dirname = None with _free_space_check(self, dirname): self._writeto_internal(fileobj, inplace, copy) def _writeto_internal(self, fileobj, inplace, copy): # For now fileobj is assumed to be a _File object if not inplace or self._new: header_offset, _ = self._writeheader(fileobj) data_offset, data_size = self._writedata(fileobj) # Set the various data location attributes on newly-written HDUs if self._new: self._header_offset = header_offset self._data_offset = data_offset self._data_size = data_size return hdrloc = self._header_offset hdrsize = self._data_offset - self._header_offset datloc = self._data_offset datsize = self._data_size if self._header._modified: # Seek to the original header location in the file self._file.seek(hdrloc) # This should update hdrloc with he header location in the new file hdrloc, hdrsize = self._writeheader(fileobj) # If the data is to be written below with self._writedata, that # will also properly update the data location; but it should be # updated here too datloc = hdrloc + hdrsize elif copy: # Seek to the original header location in the file self._file.seek(hdrloc) # Before writing, update the hdrloc with the current file position, # which is the hdrloc for the new file hdrloc = fileobj.tell() fileobj.write(self._file.read(hdrsize)) # The header size is unchanged, but the data location may be # different from before depending on if previous HDUs were resized datloc = fileobj.tell() if self._data_loaded: if self.data is not None: # Seek through the array's bases for an memmap'd array; we # can't rely on the _File object to give us this info since # the user may have replaced the previous mmap'd array if copy or self._data_replaced: # Of course, if we're copying the data to a new file # we don't care about flushing the original mmap; # instead just read it into the new file array_mmap = None else: array_mmap = _get_array_mmap(self.data) if array_mmap is not None: array_mmap.flush() else: self._file.seek(self._data_offset) datloc, datsize = self._writedata(fileobj) elif copy: datsize = self._writedata_direct_copy(fileobj) self._header_offset = hdrloc self._data_offset = datloc self._data_size = datsize self._data_replaced = False def _close(self, closed=True): # If the data was mmap'd, close the underlying mmap (this will # prevent any future access to the .data attribute if there are # not other references to it; if there are other references then # it is up to the user to clean those up if closed and self._data_loaded and _get_array_mmap(self.data) is not None: del self.data # For backwards-compatibility, though nobody should have # been using this directly: _AllHDU = _BaseHDU # For convenience... # TODO: register_hdu could be made into a class decorator which would be pretty # cool, but only once 2.6 support is dropped. register_hdu = _BaseHDU.register_hdu unregister_hdu = _BaseHDU.unregister_hdu class _CorruptedHDU(_BaseHDU): """ A Corrupted HDU class. This class is used when one or more mandatory `Card`s are corrupted (unparsable), such as the ``BITPIX``, ``NAXIS``, or ``END`` cards. A corrupted HDU usually means that the data size cannot be calculated or the ``END`` card is not found. In the case of a missing ``END`` card, the `Header` may also contain the binary data .. note:: In future, it may be possible to decipher where the last block of the `Header` ends, but this task may be difficult when the extension is a `TableHDU` containing ASCII data. """ @property def size(self): """ Returns the size (in bytes) of the HDU's data part. """ # Note: On compressed files this might report a negative size; but the # file is corrupt anyways so I'm not too worried about it. if self._buffer is not None: return len(self._buffer) - self._data_offset return self._file.size - self._data_offset def _summary(self): return (self.name, self.ver, "CorruptedHDU") def verify(self): pass class _NonstandardHDU(_BaseHDU, _Verify): """ A Non-standard HDU class. This class is used for a Primary HDU when the ``SIMPLE`` Card has a value of `False`. A non-standard HDU comes from a file that resembles a FITS file but departs from the standards in some significant way. One example would be files where the numbers are in the DEC VAX internal storage format rather than the standard FITS most significant byte first. The header for this HDU should be valid. The data for this HDU is read from the file as a byte stream that begins at the first byte after the header ``END`` card and continues until the end of the file. """ _standard = False @classmethod def match_header(cls, header): """ Matches any HDU that has the 'SIMPLE' keyword but is not a standard Primary or Groups HDU. """ # The SIMPLE keyword must be in the first card card = header.cards[0] # The check that 'GROUPS' is missing is a bit redundant, since the # match_header for GroupsHDU will always be called before this one. if card.keyword == "SIMPLE": if "GROUPS" not in header and card.value is False: return True else: raise InvalidHDUException else: return False @property def size(self): """ Returns the size (in bytes) of the HDU's data part. """ if self._buffer is not None: return len(self._buffer) - self._data_offset return self._file.size - self._data_offset def _writedata(self, fileobj): """ Differs from the base class :class:`_writedata` in that it doesn't automatically add padding, and treats the data as a string of raw bytes instead of an array. """ offset = 0 size = 0 fileobj.flush() try: offset = fileobj.tell() except OSError: offset = 0 if self.data is not None: fileobj.write(self.data) # flush, to make sure the content is written fileobj.flush() size = len(self.data) # return both the location and the size of the data area return offset, size def _summary(self): return (self.name, self.ver, "NonstandardHDU", len(self._header)) @lazyproperty def data(self): """ Return the file data. """ return self._get_raw_data(self.size, "ubyte", self._data_offset) def _verify(self, option="warn"): errs = _ErrList([], unit="Card") # verify each card for card in self._header.cards: errs.append(card._verify(option)) return errs class _ValidHDU(_BaseHDU, _Verify): """ Base class for all HDUs which are not corrupted. """ def __init__(self, data=None, header=None, name=None, ver=None, **kwargs): super().__init__(data=data, header=header) if header is not None and not isinstance(header, (Header, _BasicHeader)): # TODO: Instead maybe try initializing a new Header object from # whatever is passed in as the header--there are various types # of objects that could work for this... raise ValueError("header must be a Header object") # NOTE: private data members _checksum and _datasum are used by the # utility script "fitscheck" to detect missing checksums. self._checksum = None self._checksum_valid = None self._datasum = None self._datasum_valid = None if name is not None: self.name = name if ver is not None: self.ver = ver @classmethod def match_header(cls, header): """ Matches any HDU that is not recognized as having either the SIMPLE or XTENSION keyword in its header's first card, but is nonetheless not corrupted. TODO: Maybe it would make more sense to use _NonstandardHDU in this case? Not sure... """ return first(header.keys()) not in ("SIMPLE", "XTENSION") @property def size(self): """ Size (in bytes) of the data portion of the HDU. """ return self._header.data_size def filebytes(self): """ Calculates and returns the number of bytes that this HDU will write to a file. """ f = _File() # TODO: Fix this once new HDU writing API is settled on return self._writeheader(f)[1] + self._writedata(f)[1] def fileinfo(self): """ Returns a dictionary detailing information about the locations of this HDU within any associated file. The values are only valid after a read or write of the associated file with no intervening changes to the `HDUList`. Returns ------- dict or None The dictionary details information about the locations of this HDU within an associated file. Returns `None` when the HDU is not associated with a file. Dictionary contents: ========== ================================================ Key Value ========== ================================================ file File object associated with the HDU filemode Mode in which the file was opened (readonly, copyonwrite, update, append, ostream) hdrLoc Starting byte location of header in file datLoc Starting byte location of data block in file datSpan Data size including padding ========== ================================================ """ if hasattr(self, "_file") and self._file: return { "file": self._file, "filemode": self._file.mode, "hdrLoc": self._header_offset, "datLoc": self._data_offset, "datSpan": self._data_size, } else: return None def copy(self): """ Make a copy of the HDU, both header and data are copied. """ if self.data is not None: data = self.data.copy() else: data = None return self.__class__(data=data, header=self._header.copy()) def _verify(self, option="warn"): errs = _ErrList([], unit="Card") is_valid = BITPIX2DTYPE.__contains__ # Verify location and value of mandatory keywords. # Do the first card here, instead of in the respective HDU classes, so # the checking is in order, in case of required cards in wrong order. if isinstance(self, ExtensionHDU): firstkey = "XTENSION" firstval = self._extension else: firstkey = "SIMPLE" firstval = True self.req_cards(firstkey, 0, None, firstval, option, errs) self.req_cards( "BITPIX", 1, lambda v: (_is_int(v) and is_valid(v)), 8, option, errs ) self.req_cards( "NAXIS", 2, lambda v: (_is_int(v) and 0 <= v <= 999), 0, option, errs ) naxis = self._header.get("NAXIS", 0) if naxis < 1000: for ax in range(3, naxis + 3): key = "NAXIS" + str(ax - 2) self.req_cards( key, ax, lambda v: (_is_int(v) and v >= 0), _extract_number(self._header[key], default=1), option, errs, ) # Remove NAXISj cards where j is not in range 1, naxis inclusive. for keyword in self._header: if keyword.startswith("NAXIS") and len(keyword) > 5: try: number = int(keyword[5:]) if number <= 0 or number > naxis: raise ValueError except ValueError: err_text = ( "NAXISj keyword out of range ('{}' when " "NAXIS == {})".format(keyword, naxis) ) def fix(self=self, keyword=keyword): del self._header[keyword] errs.append( self.run_option( option=option, err_text=err_text, fix=fix, fix_text="Deleted.", ) ) # Verify that the EXTNAME keyword exists and is a string if "EXTNAME" in self._header: if not isinstance(self._header["EXTNAME"], str): err_text = "The EXTNAME keyword must have a string value." fix_text = "Converted the EXTNAME keyword to a string value." def fix(header=self._header): header["EXTNAME"] = str(header["EXTNAME"]) errs.append( self.run_option( option, err_text=err_text, fix_text=fix_text, fix=fix ) ) # verify each card for card in self._header.cards: errs.append(card._verify(option)) return errs # TODO: Improve this API a little bit--for one, most of these arguments # could be optional def req_cards(self, keyword, pos, test, fix_value, option, errlist): """ Check the existence, location, and value of a required `Card`. Parameters ---------- keyword : str The keyword to validate pos : int, callable If an ``int``, this specifies the exact location this card should have in the header. Remember that Python is zero-indexed, so this means ``pos=0`` requires the card to be the first card in the header. If given a callable, it should take one argument--the actual position of the keyword--and return `True` or `False`. This can be used for custom evaluation. For example if ``pos=lambda idx: idx > 10`` this will check that the keyword's index is greater than 10. test : callable This should be a callable (generally a function) that is passed the value of the given keyword and returns `True` or `False`. This can be used to validate the value associated with the given keyword. fix_value : str, int, float, complex, bool, None A valid value for a FITS keyword to to use if the given ``test`` fails to replace an invalid value. In other words, this provides a default value to use as a replacement if the keyword's current value is invalid. If `None`, there is no replacement value and the keyword is unfixable. option : str Output verification option. Must be one of ``"fix"``, ``"silentfix"``, ``"ignore"``, ``"warn"``, or ``"exception"``. May also be any combination of ``"fix"`` or ``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception" (e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info. errlist : list A list of validation errors already found in the FITS file; this is used primarily for the validation system to collect errors across multiple HDUs and multiple calls to `req_cards`. Notes ----- If ``pos=None``, the card can be anywhere in the header. If the card does not exist, the new card will have the ``fix_value`` as its value when created. Also check the card's value by using the ``test`` argument. """ errs = errlist fix = None try: index = self._header.index(keyword) except ValueError: index = None fixable = fix_value is not None insert_pos = len(self._header) + 1 # If pos is an int, insert at the given position (and convert it to a # lambda) if _is_int(pos): insert_pos = pos pos = lambda x: x == insert_pos # if the card does not exist if index is None: err_text = f"'{keyword}' card does not exist." fix_text = f"Fixed by inserting a new '{keyword}' card." if fixable: # use repr to accommodate both string and non-string types # Boolean is also OK in this constructor card = (keyword, fix_value) def fix(self=self, insert_pos=insert_pos, card=card): self._header.insert(insert_pos, card) errs.append( self.run_option( option, err_text=err_text, fix_text=fix_text, fix=fix, fixable=fixable, ) ) else: # if the supposed location is specified if pos is not None: if not pos(index): err_text = f"'{keyword}' card at the wrong place (card {index})." fix_text = ( f"Fixed by moving it to the right place (card {insert_pos})." ) def fix(self=self, index=index, insert_pos=insert_pos): card = self._header.cards[index] del self._header[index] self._header.insert(insert_pos, card) errs.append( self.run_option( option, err_text=err_text, fix_text=fix_text, fix=fix ) ) # if value checking is specified if test: val = self._header[keyword] if not test(val): err_text = f"'{keyword}' card has invalid value '{val}'." fix_text = f"Fixed by setting a new value '{fix_value}'." if fixable: def fix(self=self, keyword=keyword, val=fix_value): self._header[keyword] = fix_value errs.append( self.run_option( option, err_text=err_text, fix_text=fix_text, fix=fix, fixable=fixable, ) ) return errs def add_datasum(self, when=None, datasum_keyword="DATASUM"): """ Add the ``DATASUM`` card to this HDU with the value set to the checksum calculated for the data. Parameters ---------- when : str, optional Comment string for the card that by default represents the time when the checksum was calculated datasum_keyword : str, optional The name of the header keyword to store the datasum value in; this is typically 'DATASUM' per convention, but there exist use cases in which a different keyword should be used Returns ------- checksum : int The calculated datasum Notes ----- For testing purposes, provide a ``when`` argument to enable the comment value in the card to remain consistent. This will enable the generation of a ``CHECKSUM`` card with a consistent value. """ cs = self._calculate_datasum() if when is None: when = f"data unit checksum updated {self._get_timestamp()}" self._header[datasum_keyword] = (str(cs), when) return cs def add_checksum( self, when=None, override_datasum=False, checksum_keyword="CHECKSUM", datasum_keyword="DATASUM", ): """ Add the ``CHECKSUM`` and ``DATASUM`` cards to this HDU with the values set to the checksum calculated for the HDU and the data respectively. The addition of the ``DATASUM`` card may be overridden. Parameters ---------- when : str, optional comment string for the cards; by default the comments will represent the time when the checksum was calculated override_datasum : bool, optional add the ``CHECKSUM`` card only checksum_keyword : str, optional The name of the header keyword to store the checksum value in; this is typically 'CHECKSUM' per convention, but there exist use cases in which a different keyword should be used datasum_keyword : str, optional See ``checksum_keyword`` Notes ----- For testing purposes, first call `add_datasum` with a ``when`` argument, then call `add_checksum` with a ``when`` argument and ``override_datasum`` set to `True`. This will provide consistent comments for both cards and enable the generation of a ``CHECKSUM`` card with a consistent value. """ if not override_datasum: # Calculate and add the data checksum to the header. data_cs = self.add_datasum(when, datasum_keyword=datasum_keyword) else: # Just calculate the data checksum data_cs = self._calculate_datasum() if when is None: when = f"HDU checksum updated {self._get_timestamp()}" # Add the CHECKSUM card to the header with a value of all zeros. if datasum_keyword in self._header: self._header.set(checksum_keyword, "0" * 16, when, before=datasum_keyword) else: self._header.set(checksum_keyword, "0" * 16, when) csum = self._calculate_checksum(data_cs, checksum_keyword=checksum_keyword) self._header[checksum_keyword] = csum def verify_datasum(self): """ Verify that the value in the ``DATASUM`` keyword matches the value calculated for the ``DATASUM`` of the current HDU data. Returns ------- valid : int - 0 - failure - 1 - success - 2 - no ``DATASUM`` keyword present """ if "DATASUM" in self._header: datasum = self._calculate_datasum() if datasum == int(self._header["DATASUM"]): return 1 else: # Failed return 0 else: return 2 def verify_checksum(self): """ Verify that the value in the ``CHECKSUM`` keyword matches the value calculated for the current HDU CHECKSUM. Returns ------- valid : int - 0 - failure - 1 - success - 2 - no ``CHECKSUM`` keyword present """ if "CHECKSUM" in self._header: if "DATASUM" in self._header: datasum = self._calculate_datasum() else: datasum = 0 checksum = self._calculate_checksum(datasum) if checksum == self._header["CHECKSUM"]: return 1 else: # Failed return 0 else: return 2 def _verify_checksum_datasum(self): """ Verify the checksum/datasum values if the cards exist in the header. Simply displays warnings if either the checksum or datasum don't match. """ if "CHECKSUM" in self._header: self._checksum = self._header["CHECKSUM"] self._checksum_valid = self.verify_checksum() if not self._checksum_valid: warnings.warn( f"Checksum verification failed for HDU {self.name, self.ver}.\n", AstropyUserWarning, ) if "DATASUM" in self._header: self._datasum = self._header["DATASUM"] self._datasum_valid = self.verify_datasum() if not self._datasum_valid: warnings.warn( f"Datasum verification failed for HDU {self.name, self.ver}.\n", AstropyUserWarning, ) def _get_timestamp(self): """ Return the current timestamp in ISO 8601 format, with microseconds stripped off. Ex.: 2007-05-30T19:05:11 """ return datetime.datetime.now().isoformat()[:19] def _calculate_datasum(self): """ Calculate the value for the ``DATASUM`` card in the HDU. """ if not self._data_loaded: # This is the case where the data has not been read from the file # yet. We find the data in the file, read it, and calculate the # datasum. if self.size > 0: raw_data = self._get_raw_data( self._data_size, "ubyte", self._data_offset ) return self._compute_checksum(raw_data) else: return 0 elif self.data is not None: return self._compute_checksum(self.data.view("ubyte")) else: return 0 def _calculate_checksum(self, datasum, checksum_keyword="CHECKSUM"): """ Calculate the value of the ``CHECKSUM`` card in the HDU. """ old_checksum = self._header[checksum_keyword] self._header[checksum_keyword] = "0" * 16 # Convert the header to bytes. s = self._header.tostring().encode("utf8") # Calculate the checksum of the Header and data. cs = self._compute_checksum(np.frombuffer(s, dtype="ubyte"), datasum) # Encode the checksum into a string. s = self._char_encode(~cs) # Return the header card value. self._header[checksum_keyword] = old_checksum return s def _compute_checksum(self, data, sum32=0): """ Compute the ones-complement checksum of a sequence of bytes. Parameters ---------- data a memory region to checksum sum32 incremental checksum value from another region Returns ------- ones complement checksum """ blocklen = 2880 sum32 = np.uint32(sum32) for i in range(0, len(data), blocklen): length = min(blocklen, len(data) - i) # ???? sum32 = self._compute_hdu_checksum(data[i : i + length], sum32) return sum32 def _compute_hdu_checksum(self, data, sum32=0): """ Translated from FITS Checksum Proposal by Seaman, Pence, and Rots. Use uint32 literals as a hedge against type promotion to int64. This code should only be called with blocks of 2880 bytes Longer blocks result in non-standard checksums with carry overflow Historically, this code *was* called with larger blocks and for that reason still needs to be for backward compatibility. """ u8 = np.uint32(8) u16 = np.uint32(16) uFFFF = np.uint32(0xFFFF) if data.nbytes % 2: last = data[-1] data = data[:-1] else: last = np.uint32(0) data = data.view(">u2") hi = sum32 >> u16 lo = sum32 & uFFFF hi += np.add.reduce(data[0::2], dtype=np.uint64) lo += np.add.reduce(data[1::2], dtype=np.uint64) if (data.nbytes // 2) % 2: lo += last << u8 else: hi += last << u8 hicarry = hi >> u16 locarry = lo >> u16 while hicarry or locarry: hi = (hi & uFFFF) + locarry lo = (lo & uFFFF) + hicarry hicarry = hi >> u16 locarry = lo >> u16 return (hi << u16) + lo # _MASK and _EXCLUDE used for encoding the checksum value into a character # string. _MASK = [0xFF000000, 0x00FF0000, 0x0000FF00, 0x000000FF] _EXCLUDE = [ 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F, 0x40, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F, 0x60, ] def _encode_byte(self, byte): """ Encode a single byte. """ quotient = byte // 4 + ord("0") remainder = byte % 4 ch = np.array( [(quotient + remainder), quotient, quotient, quotient], dtype="int32" ) check = True while check: check = False for x in self._EXCLUDE: for j in [0, 2]: if ch[j] == x or ch[j + 1] == x: ch[j] += 1 ch[j + 1] -= 1 check = True return ch def _char_encode(self, value): """ Encodes the checksum ``value`` using the algorithm described in SPR section A.7.2 and returns it as a 16 character string. Parameters ---------- value a checksum Returns ------- ascii encoded checksum """ value = np.uint32(value) asc = np.zeros((16,), dtype="byte") ascii = np.zeros((16,), dtype="byte") for i in range(4): byte = (value & self._MASK[i]) >> ((3 - i) * 8) ch = self._encode_byte(byte) for j in range(4): asc[4 * j + i] = ch[j] for i in range(16): ascii[i] = asc[(i + 15) % 16] return decode_ascii(ascii.tobytes()) class ExtensionHDU(_ValidHDU): """ An extension HDU class. This class is the base class for the `TableHDU`, `ImageHDU`, and `BinTableHDU` classes. """ _extension = "" @classmethod def match_header(cls, header): """ This class should never be instantiated directly. Either a standard extension HDU type should be used for a specific extension, or NonstandardExtHDU should be used. """ raise NotImplementedError def writeto(self, name, output_verify="exception", overwrite=False, checksum=False): """ Works similarly to the normal writeto(), but prepends a default `PrimaryHDU` are required by extension HDUs (which cannot stand on their own). """ from .hdulist import HDUList from .image import PrimaryHDU hdulist = HDUList([PrimaryHDU(), self]) hdulist.writeto(name, output_verify, overwrite=overwrite, checksum=checksum) def _verify(self, option="warn"): errs = super()._verify(option=option) # Verify location and value of mandatory keywords. naxis = self._header.get("NAXIS", 0) self.req_cards( "PCOUNT", naxis + 3, lambda v: (_is_int(v) and v >= 0), 0, option, errs ) self.req_cards( "GCOUNT", naxis + 4, lambda v: (_is_int(v) and v == 1), 1, option, errs ) return errs # For backwards compatibility, though this needs to be deprecated # TODO: Mark this as deprecated _ExtensionHDU = ExtensionHDU class NonstandardExtHDU(ExtensionHDU): """ A Non-standard Extension HDU class. This class is used for an Extension HDU when the ``XTENSION`` `Card` has a non-standard value. In this case, Astropy can figure out how big the data is but not what it is. The data for this HDU is read from the file as a byte stream that begins at the first byte after the header ``END`` card and continues until the beginning of the next header or the end of the file. """ _standard = False @classmethod def match_header(cls, header): """ Matches any extension HDU that is not one of the standard extension HDU types. """ card = header.cards[0] xtension = card.value if isinstance(xtension, str): xtension = xtension.rstrip() # A3DTABLE is not really considered a 'standard' extension, as it was # sort of the prototype for BINTABLE; however, since our BINTABLE # implementation handles A3DTABLE HDUs it is listed here. standard_xtensions = ("IMAGE", "TABLE", "BINTABLE", "A3DTABLE") # The check that xtension is not one of the standard types should be # redundant. return card.keyword == "XTENSION" and xtension not in standard_xtensions def _summary(self): axes = tuple(self.data.shape) return (self.name, self.ver, "NonstandardExtHDU", len(self._header), axes) @lazyproperty def data(self): """ Return the file data. """ return self._get_raw_data(self.size, "ubyte", self._data_offset) # TODO: Mark this as deprecated _NonstandardExtHDU = NonstandardExtHDU
5e50c363000cf87d6bbfeb94a9019cd7bb140c24bc36515c4c058b1ea7b1c701
# Licensed under a 3-clause BSD style license - see PYFITS.rst import gzip import itertools import os import re import shutil import sys import warnings import numpy as np from astropy.io.fits.file import FILE_MODES, _File from astropy.io.fits.header import _pad_length from astropy.io.fits.util import ( _free_space_check, _get_array_mmap, _is_int, _tmp_name, fileobj_closed, fileobj_mode, ignore_sigint, isfile, ) from astropy.io.fits.verify import VerifyError, VerifyWarning, _ErrList, _Verify from astropy.utils import indent # NOTE: Python can be built without bz2. from astropy.utils.compat.optional_deps import HAS_BZ2 from astropy.utils.exceptions import AstropyUserWarning from . import compressed from .base import ExtensionHDU, _BaseHDU, _NonstandardHDU, _ValidHDU from .groups import GroupsHDU from .image import ImageHDU, PrimaryHDU if HAS_BZ2: import bz2 __all__ = ["HDUList", "fitsopen"] # FITS file signature as per RFC 4047 FITS_SIGNATURE = b"SIMPLE = T" def fitsopen( name, mode="readonly", memmap=None, save_backup=False, cache=True, lazy_load_hdus=None, ignore_missing_simple=False, *, use_fsspec=None, fsspec_kwargs=None, **kwargs, ): """Factory function to open a FITS file and return an `HDUList` object. Parameters ---------- name : str, file-like or `pathlib.Path` File to be opened. mode : str, optional Open mode, 'readonly', 'update', 'append', 'denywrite', or 'ostream'. Default is 'readonly'. If ``name`` is a file object that is already opened, ``mode`` must match the mode the file was opened with, readonly (rb), update (rb+), append (ab+), ostream (w), denywrite (rb)). memmap : bool, optional Is memory mapping to be used? This value is obtained from the configuration item ``astropy.io.fits.Conf.use_memmap``. Default is `True`. save_backup : bool, optional If the file was opened in update or append mode, this ensures that a backup of the original file is saved before any changes are flushed. The backup has the same name as the original file with ".bak" appended. If "file.bak" already exists then "file.bak.1" is used, and so on. Default is `False`. cache : bool, optional If the file name is a URL, `~astropy.utils.data.download_file` is used to open the file. This specifies whether or not to save the file locally in Astropy's download cache. Default is `True`. lazy_load_hdus : bool, optional To avoid reading all the HDUs and headers in a FITS file immediately upon opening. This is an optimization especially useful for large files, as FITS has no way of determining the number and offsets of all the HDUs in a file without scanning through the file and reading all the headers. Default is `True`. To disable lazy loading and read all HDUs immediately (the old behavior) use ``lazy_load_hdus=False``. This can lead to fewer surprises--for example with lazy loading enabled, ``len(hdul)`` can be slow, as it means the entire FITS file needs to be read in order to determine the number of HDUs. ``lazy_load_hdus=False`` ensures that all HDUs have already been loaded after the file has been opened. .. versionadded:: 1.3 uint : bool, optional Interpret signed integer data where ``BZERO`` is the central value and ``BSCALE == 1`` as unsigned integer data. For example, ``int16`` data with ``BZERO = 32768`` and ``BSCALE = 1`` would be treated as ``uint16`` data. Default is `True` so that the pseudo-unsigned integer convention is assumed. ignore_missing_end : bool, optional Do not raise an exception when opening a file that is missing an ``END`` card in the last header. Default is `False`. ignore_missing_simple : bool, optional Do not raise an exception when the SIMPLE keyword is missing. Note that io.fits will raise a warning if a SIMPLE card is present but written in a way that does not follow the FITS Standard. Default is `False`. .. versionadded:: 4.2 checksum : bool, str, optional If `True`, verifies that both ``DATASUM`` and ``CHECKSUM`` card values (when present in the HDU header) match the header and data of all HDU's in the file. Updates to a file that already has a checksum will preserve and update the existing checksums unless this argument is given a value of 'remove', in which case the CHECKSUM and DATASUM values are not checked, and are removed when saving changes to the file. Default is `False`. disable_image_compression : bool, optional If `True`, treats compressed image HDU's like normal binary table HDU's. Default is `False`. do_not_scale_image_data : bool, optional If `True`, image data is not scaled using BSCALE/BZERO values when read. Default is `False`. character_as_bytes : bool, optional Whether to return bytes for string columns, otherwise unicode strings are returned, but this does not respect memory mapping and loads the whole column in memory when accessed. Default is `False`. ignore_blank : bool, optional If `True`, the BLANK keyword is ignored if present. Default is `False`. scale_back : bool, optional If `True`, when saving changes to a file that contained scaled image data, restore the data to the original type and reapply the original BSCALE/BZERO values. This could lead to loss of accuracy if scaling back to integer values after performing floating point operations on the data. Default is `False`. output_verify : str Output verification option. Must be one of ``"fix"``, ``"silentfix"``, ``"ignore"``, ``"warn"``, or ``"exception"``. May also be any combination of ``"fix"`` or ``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception" (e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info. use_fsspec : bool, optional Use `fsspec.open` to open the file? Defaults to `False` unless ``name`` starts with the Amazon S3 storage prefix ``s3://`` or the Google Cloud Storage prefix ``gs://``. Can also be used for paths with other prefixes (e.g., ``http://``) but in this case you must explicitly pass ``use_fsspec=True``. Use of this feature requires the optional ``fsspec`` package. A ``ModuleNotFoundError`` will be raised if the dependency is missing. .. versionadded:: 5.2 fsspec_kwargs : dict, optional Keyword arguments passed on to `fsspec.open`. This can be used to configure cloud storage credentials and caching behavior. For example, pass ``fsspec_kwargs={"anon": True}`` to enable anonymous access to Amazon S3 open data buckets. See ``fsspec``'s documentation for available parameters. .. versionadded:: 5.2 Returns ------- hdulist : `HDUList` `HDUList` containing all of the header data units in the file. """ from astropy.io.fits import conf if memmap is None: # distinguish between True (kwarg explicitly set) # and None (preference for memmap in config, might be ignored) memmap = None if conf.use_memmap else False else: memmap = bool(memmap) if lazy_load_hdus is None: lazy_load_hdus = conf.lazy_load_hdus else: lazy_load_hdus = bool(lazy_load_hdus) if "uint" not in kwargs: kwargs["uint"] = conf.enable_uint if not name: raise ValueError(f"Empty filename: {name!r}") return HDUList.fromfile( name, mode, memmap, save_backup, cache, lazy_load_hdus, ignore_missing_simple, use_fsspec=use_fsspec, fsspec_kwargs=fsspec_kwargs, **kwargs, ) class HDUList(list, _Verify): """ HDU list class. This is the top-level FITS object. When a FITS file is opened, a `HDUList` object is returned. """ def __init__(self, hdus=[], file=None): """ Construct a `HDUList` object. Parameters ---------- hdus : BaseHDU or sequence thereof, optional The HDU object(s) to comprise the `HDUList`. Should be instances of HDU classes like `ImageHDU` or `BinTableHDU`. file : file-like, bytes, optional The opened physical file associated with the `HDUList` or a bytes object containing the contents of the FITS file. """ if isinstance(file, bytes): self._data = file self._file = None else: self._file = file self._data = None # For internal use only--the keyword args passed to fitsopen / # HDUList.fromfile/string when opening the file self._open_kwargs = {} self._in_read_next_hdu = False # If we have read all the HDUs from the file or not # The assumes that all HDUs have been written when we first opened the # file; we do not currently support loading additional HDUs from a file # while it is being streamed to. In the future that might be supported # but for now this is only used for the purpose of lazy-loading of # existing HDUs. if file is None: self._read_all = True elif self._file is not None: # Should never attempt to read HDUs in ostream mode self._read_all = self._file.mode == "ostream" else: self._read_all = False if hdus is None: hdus = [] # can take one HDU, as well as a list of HDU's as input if isinstance(hdus, _ValidHDU): hdus = [hdus] elif not isinstance(hdus, (HDUList, list)): raise TypeError("Invalid input for HDUList.") for idx, hdu in enumerate(hdus): if not isinstance(hdu, _BaseHDU): raise TypeError(f"Element {idx} in the HDUList input is not an HDU.") super().__init__(hdus) if file is None: # Only do this when initializing from an existing list of HDUs # When initializing from a file, this will be handled by the # append method after the first HDU is read self.update_extend() def __len__(self): if not self._in_read_next_hdu: self.readall() return super().__len__() def __repr__(self): # Special case: if the FITS file is located on a remote file system # and has not been fully read yet, we return a simplified repr to # avoid downloading the entire file. We can tell that a file is remote # from the fact that the ``fsspec`` package was used to open it. is_fsspec_file = self._file and "fsspec" in str( self._file._file.__class__.__bases__ ) if not self._read_all and is_fsspec_file: return f"{type(self)} (partially read)" # In order to correctly repr an HDUList we need to load all the # HDUs as well self.readall() return super().__repr__() def __iter__(self): # While effectively this does the same as: # for idx in range(len(self)): # yield self[idx] # the more complicated structure is here to prevent the use of len(), # which would break the lazy loading for idx in itertools.count(): try: yield self[idx] except IndexError: break def __getitem__(self, key): """ Get an HDU from the `HDUList`, indexed by number or name. """ # If the key is a slice we need to make sure the necessary HDUs # have been loaded before passing the slice on to super. if isinstance(key, slice): max_idx = key.stop # Check for and handle the case when no maximum was # specified (e.g. [1:]). if max_idx is None: # We need all of the HDUs, so load them # and reset the maximum to the actual length. max_idx = len(self) # Just in case the max_idx is negative... max_idx = self._positive_index_of(max_idx) number_loaded = super().__len__() if max_idx >= number_loaded: # We need more than we have, try loading up to and including # max_idx. Note we do not try to be clever about skipping HDUs # even though key.step might conceivably allow it. for i in range(number_loaded, max_idx): # Read until max_idx or to the end of the file, whichever # comes first. if not self._read_next_hdu(): break try: hdus = super().__getitem__(key) except IndexError as e: # Raise a more helpful IndexError if the file was not fully read. if self._read_all: raise e else: raise IndexError( "HDU not found, possibly because the index " "is out of range, or because the file was " "closed before all HDUs were read" ) else: return HDUList(hdus) # Originally this used recursion, but hypothetically an HDU with # a very large number of HDUs could blow the stack, so use a loop # instead try: return self._try_while_unread_hdus( super().__getitem__, self._positive_index_of(key) ) except IndexError as e: # Raise a more helpful IndexError if the file was not fully read. if self._read_all: raise e else: raise IndexError( "HDU not found, possibly because the index " "is out of range, or because the file was " "closed before all HDUs were read" ) def __contains__(self, item): """ Returns `True` if ``item`` is an ``HDU`` _in_ ``self`` or a valid extension specification (e.g., integer extension number, extension name, or a tuple of extension name and an extension version) of a ``HDU`` in ``self``. """ try: self._try_while_unread_hdus(self.index_of, item) except (KeyError, ValueError): return False return True def __setitem__(self, key, hdu): """ Set an HDU to the `HDUList`, indexed by number or name. """ _key = self._positive_index_of(key) if isinstance(hdu, (slice, list)): if _is_int(_key): raise ValueError("An element in the HDUList must be an HDU.") for item in hdu: if not isinstance(item, _BaseHDU): raise ValueError(f"{item} is not an HDU.") else: if not isinstance(hdu, _BaseHDU): raise ValueError(f"{hdu} is not an HDU.") try: self._try_while_unread_hdus(super().__setitem__, _key, hdu) except IndexError: raise IndexError(f"Extension {key} is out of bound or not found.") self._resize = True self._truncate = False def __delitem__(self, key): """ Delete an HDU from the `HDUList`, indexed by number or name. """ if isinstance(key, slice): end_index = len(self) else: key = self._positive_index_of(key) end_index = len(self) - 1 self._try_while_unread_hdus(super().__delitem__, key) if key == end_index or key == -1 and not self._resize: self._truncate = True else: self._truncate = False self._resize = True # Support the 'with' statement def __enter__(self): return self def __exit__(self, type, value, traceback): output_verify = self._open_kwargs.get("output_verify", "exception") self.close(output_verify=output_verify) @classmethod def fromfile( cls, fileobj, mode=None, memmap=None, save_backup=False, cache=True, lazy_load_hdus=True, ignore_missing_simple=False, **kwargs, ): """ Creates an `HDUList` instance from a file-like object. The actual implementation of ``fitsopen()``, and generally shouldn't be used directly. Use :func:`open` instead (and see its documentation for details of the parameters accepted by this method). """ return cls._readfrom( fileobj=fileobj, mode=mode, memmap=memmap, save_backup=save_backup, cache=cache, ignore_missing_simple=ignore_missing_simple, lazy_load_hdus=lazy_load_hdus, **kwargs, ) @classmethod def fromstring(cls, data, **kwargs): """ Creates an `HDUList` instance from a string or other in-memory data buffer containing an entire FITS file. Similar to :meth:`HDUList.fromfile`, but does not accept the mode or memmap arguments, as they are only relevant to reading from a file on disk. This is useful for interfacing with other libraries such as CFITSIO, and may also be useful for streaming applications. Parameters ---------- data : str, buffer-like, etc. A string or other memory buffer containing an entire FITS file. Buffer-like objects include :class:`~bytes`, :class:`~bytearray`, :class:`~memoryview`, and :class:`~numpy.ndarray`. It should be noted that if that memory is read-only (such as a Python string) the returned :class:`HDUList`'s data portions will also be read-only. **kwargs : dict Optional keyword arguments. See :func:`astropy.io.fits.open` for details. Returns ------- hdul : HDUList An :class:`HDUList` object representing the in-memory FITS file. """ try: # Test that the given object supports the buffer interface by # ensuring an ndarray can be created from it np.ndarray((), dtype="ubyte", buffer=data) except TypeError: raise TypeError( "The provided object {} does not contain an underlying " "memory buffer. fromstring() requires an object that " "supports the buffer interface such as bytes, buffer, " "memoryview, ndarray, etc. This restriction is to ensure " "that efficient access to the array/table data is possible." "".format(data) ) return cls._readfrom(data=data, **kwargs) def fileinfo(self, index): """ Returns a dictionary detailing information about the locations of the indexed HDU within any associated file. The values are only valid after a read or write of the associated file with no intervening changes to the `HDUList`. Parameters ---------- index : int Index of HDU for which info is to be returned. Returns ------- fileinfo : dict or None The dictionary details information about the locations of the indexed HDU within an associated file. Returns `None` when the HDU is not associated with a file. Dictionary contents: ========== ======================================================== Key Value ========== ======================================================== file File object associated with the HDU filename Name of associated file object filemode Mode in which the file was opened (readonly, update, append, denywrite, ostream) resized Flag that when `True` indicates that the data has been resized since the last read/write so the returned values may not be valid. hdrLoc Starting byte location of header in file datLoc Starting byte location of data block in file datSpan Data size including padding ========== ======================================================== """ if self._file is not None: output = self[index].fileinfo() if not output: # OK, the HDU associated with this index is not yet # tied to the file associated with the HDUList. The only way # to get the file object is to check each of the HDU's in the # list until we find the one associated with the file. f = None for hdu in self: info = hdu.fileinfo() if info: f = info["file"] fm = info["filemode"] break output = { "file": f, "filemode": fm, "hdrLoc": None, "datLoc": None, "datSpan": None, } output["filename"] = self._file.name output["resized"] = self._wasresized() else: output = None return output def __copy__(self): """ Return a shallow copy of an HDUList. Returns ------- copy : `HDUList` A shallow copy of this `HDUList` object. """ return self[:] # Syntactic sugar for `__copy__()` magic method copy = __copy__ def __deepcopy__(self, memo=None): return HDUList([hdu.copy() for hdu in self]) def pop(self, index=-1): """Remove an item from the list and return it. Parameters ---------- index : int, str, tuple of (string, int), optional An integer value of ``index`` indicates the position from which ``pop()`` removes and returns an HDU. A string value or a tuple of ``(string, int)`` functions as a key for identifying the HDU to be removed and returned. If ``key`` is a tuple, it is of the form ``(key, ver)`` where ``ver`` is an ``EXTVER`` value that must match the HDU being searched for. If the key is ambiguous (e.g. there are multiple 'SCI' extensions) the first match is returned. For a more precise match use the ``(name, ver)`` pair. If even the ``(name, ver)`` pair is ambiguous the numeric index must be used to index the duplicate HDU. Returns ------- hdu : BaseHDU The HDU object at position indicated by ``index`` or having name and version specified by ``index``. """ # Make sure that HDUs are loaded before attempting to pop self.readall() list_index = self.index_of(index) return super().pop(list_index) def insert(self, index, hdu): """ Insert an HDU into the `HDUList` at the given ``index``. Parameters ---------- index : int Index before which to insert the new HDU. hdu : BaseHDU The HDU object to insert """ if not isinstance(hdu, _BaseHDU): raise ValueError(f"{hdu} is not an HDU.") num_hdus = len(self) if index == 0 or num_hdus == 0: if num_hdus != 0: # We are inserting a new Primary HDU so we need to # make the current Primary HDU into an extension HDU. if isinstance(self[0], GroupsHDU): raise ValueError( "The current Primary HDU is a GroupsHDU. " "It can't be made into an extension HDU, " "so another HDU cannot be inserted before it." ) hdu1 = ImageHDU(self[0].data, self[0].header) # Insert it into position 1, then delete HDU at position 0. super().insert(1, hdu1) super().__delitem__(0) if not isinstance(hdu, (PrimaryHDU, _NonstandardHDU)): # You passed in an Extension HDU but we need a Primary HDU. # If you provided an ImageHDU then we can convert it to # a primary HDU and use that. if isinstance(hdu, ImageHDU): hdu = PrimaryHDU(hdu.data, hdu.header) else: # You didn't provide an ImageHDU so we create a # simple Primary HDU and append that first before # we append the new Extension HDU. phdu = PrimaryHDU() super().insert(0, phdu) index = 1 else: if isinstance(hdu, GroupsHDU): raise ValueError("A GroupsHDU must be inserted as a Primary HDU.") if isinstance(hdu, PrimaryHDU): # You passed a Primary HDU but we need an Extension HDU # so create an Extension HDU from the input Primary HDU. hdu = ImageHDU(hdu.data, hdu.header) super().insert(index, hdu) hdu._new = True self._resize = True self._truncate = False # make sure the EXTEND keyword is in primary HDU if there is extension self.update_extend() def append(self, hdu): """ Append a new HDU to the `HDUList`. Parameters ---------- hdu : BaseHDU HDU to add to the `HDUList`. """ if not isinstance(hdu, _BaseHDU): raise ValueError("HDUList can only append an HDU.") if len(self) > 0: if isinstance(hdu, GroupsHDU): raise ValueError("Can't append a GroupsHDU to a non-empty HDUList") if isinstance(hdu, PrimaryHDU): # You passed a Primary HDU but we need an Extension HDU # so create an Extension HDU from the input Primary HDU. # TODO: This isn't necessarily sufficient to copy the HDU; # _header_offset and friends need to be copied too. hdu = ImageHDU(hdu.data, hdu.header) else: if not isinstance(hdu, (PrimaryHDU, _NonstandardHDU)): # You passed in an Extension HDU but we need a Primary # HDU. # If you provided an ImageHDU then we can convert it to # a primary HDU and use that. if isinstance(hdu, ImageHDU): hdu = PrimaryHDU(hdu.data, hdu.header) else: # You didn't provide an ImageHDU so we create a # simple Primary HDU and append that first before # we append the new Extension HDU. phdu = PrimaryHDU() super().append(phdu) super().append(hdu) hdu._new = True self._resize = True self._truncate = False # make sure the EXTEND keyword is in primary HDU if there is extension self.update_extend() def index_of(self, key): """ Get the index of an HDU from the `HDUList`. Parameters ---------- key : int, str, tuple of (string, int) or BaseHDU The key identifying the HDU. If ``key`` is a tuple, it is of the form ``(name, ver)`` where ``ver`` is an ``EXTVER`` value that must match the HDU being searched for. If the key is ambiguous (e.g. there are multiple 'SCI' extensions) the first match is returned. For a more precise match use the ``(name, ver)`` pair. If even the ``(name, ver)`` pair is ambiguous (it shouldn't be but it's not impossible) the numeric index must be used to index the duplicate HDU. When ``key`` is an HDU object, this function returns the index of that HDU object in the ``HDUList``. Returns ------- index : int The index of the HDU in the `HDUList`. Raises ------ ValueError If ``key`` is an HDU object and it is not found in the ``HDUList``. KeyError If an HDU specified by the ``key`` that is an extension number, extension name, or a tuple of extension name and version is not found in the ``HDUList``. """ if _is_int(key): return key elif isinstance(key, tuple): _key, _ver = key elif isinstance(key, _BaseHDU): return self.index(key) else: _key = key _ver = None if not isinstance(_key, str): raise KeyError( "{} indices must be integers, extension names as strings, " "or (extname, version) tuples; got {}" "".format(self.__class__.__name__, _key) ) _key = (_key.strip()).upper() found = None for idx, hdu in enumerate(self): name = hdu.name if isinstance(name, str): name = name.strip().upper() # 'PRIMARY' should always work as a reference to the first HDU if (name == _key or (_key == "PRIMARY" and idx == 0)) and ( _ver is None or _ver == hdu.ver ): found = idx break if found is None: raise KeyError(f"Extension {key!r} not found.") else: return found def _positive_index_of(self, key): """ Same as index_of, but ensures always returning a positive index or zero. (Really this should be called non_negative_index_of but it felt too long.) This means that if the key is a negative integer, we have to convert it to the corresponding positive index. This means knowing the length of the HDUList, which in turn means loading all HDUs. Therefore using negative indices on HDULists is inherently inefficient. """ index = self.index_of(key) if index >= 0: return index if abs(index) > len(self): raise IndexError(f"Extension {index} is out of bound or not found.") return len(self) + index def readall(self): """ Read data of all HDUs into memory. """ while self._read_next_hdu(): pass @ignore_sigint def flush(self, output_verify="fix", verbose=False): """ Force a write of the `HDUList` back to the file (for append and update modes only). Parameters ---------- output_verify : str Output verification option. Must be one of ``"fix"``, ``"silentfix"``, ``"ignore"``, ``"warn"``, or ``"exception"``. May also be any combination of ``"fix"`` or ``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception" (e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info. verbose : bool When `True`, print verbose messages """ if self._file.mode not in ("append", "update", "ostream"): warnings.warn( f"Flush for '{self._file.mode}' mode is not supported.", AstropyUserWarning, ) return save_backup = self._open_kwargs.get("save_backup", False) if save_backup and self._file.mode in ("append", "update"): filename = self._file.name if os.path.exists(filename): # The the file doesn't actually exist anymore for some reason # then there's no point in trying to make a backup backup = filename + ".bak" idx = 1 while os.path.exists(backup): backup = filename + ".bak." + str(idx) idx += 1 warnings.warn( f"Saving a backup of {filename} to {backup}.", AstropyUserWarning, ) try: shutil.copy(filename, backup) except OSError as exc: raise OSError( f"Failed to save backup to destination {filename}" ) from exc self.verify(option=output_verify) if self._file.mode in ("append", "ostream"): for hdu in self: if verbose: try: extver = str(hdu._header["extver"]) except KeyError: extver = "" # only append HDU's which are "new" if hdu._new: hdu._prewriteto(checksum=hdu._output_checksum) with _free_space_check(self): hdu._writeto(self._file) if verbose: print("append HDU", hdu.name, extver) hdu._new = False hdu._postwriteto() elif self._file.mode == "update": self._flush_update() def update_extend(self): """ Make sure that if the primary header needs the keyword ``EXTEND`` that it has it and it is correct. """ if not len(self): return if not isinstance(self[0], PrimaryHDU): # A PrimaryHDU will be automatically inserted at some point, but it # might not have been added yet return hdr = self[0].header def get_first_ext(): try: return self[1] except IndexError: return None if "EXTEND" in hdr: if not hdr["EXTEND"] and get_first_ext() is not None: hdr["EXTEND"] = True elif get_first_ext() is not None: if hdr["NAXIS"] == 0: hdr.set("EXTEND", True, after="NAXIS") else: n = hdr["NAXIS"] hdr.set("EXTEND", True, after="NAXIS" + str(n)) def writeto( self, fileobj, output_verify="exception", overwrite=False, checksum=False ): """ Write the `HDUList` to a new file. Parameters ---------- fileobj : str, file-like or `pathlib.Path` File to write to. If a file object, must be opened in a writeable mode. output_verify : str Output verification option. Must be one of ``"fix"``, ``"silentfix"``, ``"ignore"``, ``"warn"``, or ``"exception"``. May also be any combination of ``"fix"`` or ``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception" (e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info. overwrite : bool, optional If ``True``, overwrite the output file if it exists. Raises an ``OSError`` if ``False`` and the output file exists. Default is ``False``. checksum : bool When `True` adds both ``DATASUM`` and ``CHECKSUM`` cards to the headers of all HDU's written to the file. """ if len(self) == 0: warnings.warn("There is nothing to write.", AstropyUserWarning) return self.verify(option=output_verify) # make sure the EXTEND keyword is there if there is extension self.update_extend() # make note of whether the input file object is already open, in which # case we should not close it after writing (that should be the job # of the caller) closed = isinstance(fileobj, str) or fileobj_closed(fileobj) mode = FILE_MODES[fileobj_mode(fileobj)] if isfile(fileobj) else "ostream" # This can accept an open file object that's open to write only, or in # append/update modes but only if the file doesn't exist. fileobj = _File(fileobj, mode=mode, overwrite=overwrite) hdulist = self.fromfile(fileobj) try: dirname = os.path.dirname(hdulist._file.name) except (AttributeError, TypeError): dirname = None try: with _free_space_check(self, dirname=dirname): for hdu in self: hdu._prewriteto(checksum=checksum) hdu._writeto(hdulist._file) hdu._postwriteto() finally: hdulist.close(output_verify=output_verify, closed=closed) def close(self, output_verify="exception", verbose=False, closed=True): """ Close the associated FITS file and memmap object, if any. Parameters ---------- output_verify : str Output verification option. Must be one of ``"fix"``, ``"silentfix"``, ``"ignore"``, ``"warn"``, or ``"exception"``. May also be any combination of ``"fix"`` or ``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception" (e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info. verbose : bool When `True`, print out verbose messages. closed : bool When `True`, close the underlying file object. """ try: if ( self._file and self._file.mode in ("append", "update") and not self._file.closed ): self.flush(output_verify=output_verify, verbose=verbose) finally: if self._file and closed and hasattr(self._file, "close"): self._file.close() # Give individual HDUs an opportunity to do on-close cleanup for hdu in self: hdu._close(closed=closed) def info(self, output=None): """ Summarize the info of the HDUs in this `HDUList`. Note that this function prints its results to the console---it does not return a value. Parameters ---------- output : file-like or bool, optional A file-like object to write the output to. If `False`, does not output to a file and instead returns a list of tuples representing the HDU info. Writes to ``sys.stdout`` by default. """ if output is None: output = sys.stdout if self._file is None: name = "(No file associated with this HDUList)" else: name = self._file.name results = [ f"Filename: {name}", "No. Name Ver Type Cards Dimensions Format", ] format = "{:3d} {:10} {:3} {:11} {:5d} {} {} {}" default = ("", "", "", 0, (), "", "") for idx, hdu in enumerate(self): summary = hdu._summary() if len(summary) < len(default): summary += default[len(summary) :] summary = (idx,) + summary if output: results.append(format.format(*summary)) else: results.append(summary) if output: output.write("\n".join(results)) output.write("\n") output.flush() else: return results[2:] def filename(self): """ Return the file name associated with the HDUList object if one exists. Otherwise returns None. Returns ------- filename : str A string containing the file name associated with the HDUList object if an association exists. Otherwise returns None. """ if self._file is not None: if hasattr(self._file, "name"): return self._file.name return None @classmethod def _readfrom( cls, fileobj=None, data=None, mode=None, memmap=None, cache=True, lazy_load_hdus=True, ignore_missing_simple=False, *, use_fsspec=None, fsspec_kwargs=None, **kwargs, ): """ Provides the implementations from HDUList.fromfile and HDUList.fromstring, both of which wrap this method, as their implementations are largely the same. """ if fileobj is not None: if not isinstance(fileobj, _File): # instantiate a FITS file object (ffo) fileobj = _File( fileobj, mode=mode, memmap=memmap, cache=cache, use_fsspec=use_fsspec, fsspec_kwargs=fsspec_kwargs, ) # The Astropy mode is determined by the _File initializer if the # supplied mode was None mode = fileobj.mode hdulist = cls(file=fileobj) else: if mode is None: # The default mode mode = "readonly" hdulist = cls(file=data) # This method is currently only called from HDUList.fromstring and # HDUList.fromfile. If fileobj is None then this must be the # fromstring case; the data type of ``data`` will be checked in the # _BaseHDU.fromstring call. if ( not ignore_missing_simple and hdulist._file and hdulist._file.mode != "ostream" and hdulist._file.size > 0 ): pos = hdulist._file.tell() # FITS signature is supposed to be in the first 30 bytes, but to # allow reading various invalid files we will check in the first # card (80 bytes). simple = hdulist._file.read(80) match_sig = simple[:29] == FITS_SIGNATURE[:-1] and simple[29:30] in ( b"T", b"F", ) if not match_sig: # Check the SIMPLE card is there but not written correctly match_sig_relaxed = re.match(rb"SIMPLE\s*=\s*[T|F]", simple) if match_sig_relaxed: warnings.warn( "Found a SIMPLE card but its format doesn't" " respect the FITS Standard", VerifyWarning, ) else: if hdulist._file.close_on_error: hdulist._file.close() raise OSError( "No SIMPLE card found, this file does not appear to " "be a valid FITS file. If this is really a FITS file, " "try with ignore_missing_simple=True" ) hdulist._file.seek(pos) # Store additional keyword args that were passed to fits.open hdulist._open_kwargs = kwargs if fileobj is not None and fileobj.writeonly: # Output stream--not interested in reading/parsing # the HDUs--just writing to the output file return hdulist # Make sure at least the PRIMARY HDU can be read read_one = hdulist._read_next_hdu() # If we're trying to read only and no header units were found, # raise an exception if not read_one and mode in ("readonly", "denywrite"): # Close the file if necessary (issue #6168) if hdulist._file.close_on_error: hdulist._file.close() raise OSError("Empty or corrupt FITS file") if not lazy_load_hdus or kwargs.get("checksum") is True: # Go ahead and load all HDUs while hdulist._read_next_hdu(): pass # initialize/reset attributes to be used in "update/append" mode hdulist._resize = False hdulist._truncate = False return hdulist def _try_while_unread_hdus(self, func, *args, **kwargs): """ Attempt an operation that accesses an HDU by index/name that can fail if not all HDUs have been read yet. Keep reading HDUs until the operation succeeds or there are no more HDUs to read. """ while True: try: return func(*args, **kwargs) except Exception: if self._read_next_hdu(): continue else: raise def _read_next_hdu(self): """ Lazily load a single HDU from the fileobj or data string the `HDUList` was opened from, unless no further HDUs are found. Returns True if a new HDU was loaded, or False otherwise. """ if self._read_all: return False saved_compression_enabled = compressed.COMPRESSION_ENABLED fileobj, data, kwargs = self._file, self._data, self._open_kwargs if fileobj is not None and fileobj.closed: return False try: self._in_read_next_hdu = True if ( "disable_image_compression" in kwargs and kwargs["disable_image_compression"] ): compressed.COMPRESSION_ENABLED = False # read all HDUs try: if fileobj is not None: try: # Make sure we're back to the end of the last read # HDU if len(self) > 0: last = self[len(self) - 1] if last._data_offset is not None: offset = last._data_offset + last._data_size fileobj.seek(offset, os.SEEK_SET) hdu = _BaseHDU.readfrom(fileobj, **kwargs) except EOFError: self._read_all = True return False except OSError: # Close the file: see # https://github.com/astropy/astropy/issues/6168 # if self._file.close_on_error: self._file.close() if fileobj.writeonly: self._read_all = True return False else: raise else: if not data: self._read_all = True return False hdu = _BaseHDU.fromstring(data, **kwargs) self._data = data[hdu._data_offset + hdu._data_size :] super().append(hdu) if len(self) == 1: # Check for an extension HDU and update the EXTEND # keyword of the primary HDU accordingly self.update_extend() hdu._new = False if "checksum" in kwargs: hdu._output_checksum = kwargs["checksum"] # check in the case there is extra space after the last HDU or # corrupted HDU except (VerifyError, ValueError) as exc: warnings.warn( "Error validating header for HDU #{} (note: Astropy " "uses zero-based indexing).\n{}\n" "There may be extra bytes after the last HDU or the " "file is corrupted.".format(len(self), indent(str(exc))), VerifyWarning, ) del exc self._read_all = True return False finally: compressed.COMPRESSION_ENABLED = saved_compression_enabled self._in_read_next_hdu = False return True def _verify(self, option="warn"): errs = _ErrList([], unit="HDU") # the first (0th) element must be a primary HDU if ( len(self) > 0 and (not isinstance(self[0], PrimaryHDU)) and (not isinstance(self[0], _NonstandardHDU)) ): err_text = "HDUList's 0th element is not a primary HDU." fix_text = "Fixed by inserting one as 0th HDU." def fix(self=self): self.insert(0, PrimaryHDU()) err = self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix) errs.append(err) if len(self) > 1 and ( "EXTEND" not in self[0].header or self[0].header["EXTEND"] is not True ): err_text = ( "Primary HDU does not contain an EXTEND keyword " "equal to T even though there are extension HDUs." ) fix_text = "Fixed by inserting or updating the EXTEND keyword." def fix(header=self[0].header): naxis = header["NAXIS"] if naxis == 0: after = "NAXIS" else: after = "NAXIS" + str(naxis) header.set("EXTEND", value=True, after=after) errs.append( self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix) ) # each element calls their own verify for idx, hdu in enumerate(self): if idx > 0 and (not isinstance(hdu, ExtensionHDU)): err_text = f"HDUList's element {str(idx)} is not an extension HDU." err = self.run_option(option, err_text=err_text, fixable=False) errs.append(err) else: result = hdu._verify(option) if result: errs.append(result) return errs def _flush_update(self): """Implements flushing changes to a file in update mode.""" for hdu in self: # Need to all _prewriteto() for each HDU first to determine if # resizing will be necessary hdu._prewriteto(checksum=hdu._output_checksum, inplace=True) try: self._wasresized() # if the HDUList is resized, need to write out the entire contents of # the hdulist to the file. if self._resize or self._file.compression: self._flush_resize() else: # if not resized, update in place for hdu in self: hdu._writeto(self._file, inplace=True) # reset the modification attributes after updating for hdu in self: hdu._header._modified = False finally: for hdu in self: hdu._postwriteto() def _flush_resize(self): """ Implements flushing changes in update mode when parts of one or more HDU need to be resized. """ old_name = self._file.name old_memmap = self._file.memmap name = _tmp_name(old_name) if not self._file.file_like: old_mode = os.stat(old_name).st_mode # The underlying file is an actual file object. The HDUList is # resized, so we need to write it to a tmp file, delete the # original file, and rename the tmp file to the original file. if self._file.compression == "gzip": new_file = gzip.GzipFile(name, mode="ab+") elif self._file.compression == "bzip2": if not HAS_BZ2: raise ModuleNotFoundError( "This Python installation does not provide the bz2 module." ) new_file = bz2.BZ2File(name, mode="w") else: new_file = name with self.fromfile(new_file, mode="append") as hdulist: for hdu in self: hdu._writeto(hdulist._file, inplace=True, copy=True) if sys.platform.startswith("win"): # Collect a list of open mmaps to the data; this well be # used later. See below. mmaps = [ (idx, _get_array_mmap(hdu.data), hdu.data) for idx, hdu in enumerate(self) if hdu._has_data ] hdulist._file.close() self._file.close() if sys.platform.startswith("win"): # Close all open mmaps to the data. This is only necessary on # Windows, which will not allow a file to be renamed or deleted # until all handles to that file have been closed. for idx, mmap, arr in mmaps: if mmap is not None: mmap.close() os.remove(self._file.name) # reopen the renamed new file with "update" mode os.rename(name, old_name) os.chmod(old_name, old_mode) if isinstance(new_file, gzip.GzipFile): old_file = gzip.GzipFile(old_name, mode="rb+") else: old_file = old_name ffo = _File(old_file, mode="update", memmap=old_memmap) self._file = ffo for hdu in self: # Need to update the _file attribute and close any open mmaps # on each HDU if hdu._has_data and _get_array_mmap(hdu.data) is not None: del hdu.data hdu._file = ffo if sys.platform.startswith("win"): # On Windows, all the original data mmaps were closed above. # However, it's possible that the user still has references to # the old data which would no longer work (possibly even cause # a segfault if they try to access it). This replaces the # buffers used by the original arrays with the buffers of mmap # arrays created from the new file. This seems to work, but # it's a flaming hack and carries no guarantees that it won't # lead to odd behavior in practice. Better to just not keep # references to data from files that had to be resized upon # flushing (on Windows--again, this is no problem on Linux). for idx, mmap, arr in mmaps: if mmap is not None: # https://github.com/numpy/numpy/issues/8628 with warnings.catch_warnings(): warnings.simplefilter("ignore", category=DeprecationWarning) arr.data = self[idx].data.data del mmaps # Just to be sure else: # The underlying file is not a file object, it is a file like # object. We can't write out to a file, we must update the file # like object in place. To do this, we write out to a temporary # file, then delete the contents in our file like object, then # write the contents of the temporary file to the now empty file # like object. self.writeto(name) hdulist = self.fromfile(name) ffo = self._file ffo.truncate(0) ffo.seek(0) for hdu in hdulist: hdu._writeto(ffo, inplace=True, copy=True) # Close the temporary file and delete it. hdulist.close() os.remove(hdulist._file.name) # reset the resize attributes after updating self._resize = False self._truncate = False for hdu in self: hdu._header._modified = False hdu._new = False hdu._file = ffo def _wasresized(self, verbose=False): """ Determine if any changes to the HDUList will require a file resize when flushing the file. Side effect of setting the objects _resize attribute. """ if not self._resize: # determine if any of the HDU is resized for hdu in self: # Header: nbytes = len(str(hdu._header)) if nbytes != (hdu._data_offset - hdu._header_offset): self._resize = True self._truncate = False if verbose: print("One or more header is resized.") break # Data: if not hdu._has_data: continue nbytes = hdu.size nbytes = nbytes + _pad_length(nbytes) if nbytes != hdu._data_size: self._resize = True self._truncate = False if verbose: print("One or more data area is resized.") break if self._truncate: try: self._file.truncate(hdu._data_offset + hdu._data_size) except OSError: self._resize = True self._truncate = False return self._resize
69e6d3752a69735f48c1ac7c0dab35896fb738a52177741a8749236f95fc1a61
# Licensed under a 3-clause BSD style license - see PYFITS.rst import gzip import os from astropy.io.fits.file import _File from astropy.io.fits.header import _pad_length from astropy.io.fits.util import fileobj_name from .base import BITPIX2DTYPE, _BaseHDU from .hdulist import HDUList from .image import PrimaryHDU class StreamingHDU: """ A class that provides the capability to stream data to a FITS file instead of requiring data to all be written at once. The following pseudocode illustrates its use:: header = astropy.io.fits.Header() for all the cards you need in the header: header[key] = (value, comment) shdu = astropy.io.fits.StreamingHDU('filename.fits', header) for each piece of data: shdu.write(data) shdu.close() """ def __init__(self, name, header): """ Construct a `StreamingHDU` object given a file name and a header. Parameters ---------- name : path-like or file-like The file to which the header and data will be streamed. If opened, the file object must be opened in a writeable binary mode such as 'wb' or 'ab+'. header : `Header` instance The header object associated with the data to be written to the file. Notes ----- The file will be opened and the header appended to the end of the file. If the file does not already exist, it will be created, and if the header represents a Primary header, it will be written to the beginning of the file. If the file does not exist and the provided header is not a Primary header, a default Primary HDU will be inserted at the beginning of the file and the provided header will be added as the first extension. If the file does already exist, but the provided header represents a Primary header, the header will be modified to an image extension header and appended to the end of the file. """ if isinstance(name, gzip.GzipFile): raise TypeError("StreamingHDU not supported for GzipFile objects.") self._header = header.copy() # handle a file object instead of a file name filename = fileobj_name(name) or "" filename = os.path.expanduser(filename) # Check if the file already exists. If it does not, check to see # if we were provided with a Primary Header. If not we will need # to prepend a default PrimaryHDU to the file before writing the # given header. newfile = False if filename: if not os.path.exists(filename) or os.path.getsize(filename) == 0: newfile = True elif hasattr(name, "len") and name.len == 0: newfile = True if newfile: if "SIMPLE" not in self._header: hdulist = HDUList([PrimaryHDU()]) hdulist.writeto(name, "exception") else: # This will not be the first extension in the file so we # must change the Primary header provided into an image # extension header. if "SIMPLE" in self._header: self._header.set("XTENSION", "IMAGE", "Image extension", after="SIMPLE") del self._header["SIMPLE"] if "PCOUNT" not in self._header: dim = self._header["NAXIS"] if dim == 0: dim = "" else: dim = str(dim) self._header.set( "PCOUNT", 0, "number of parameters", after="NAXIS" + dim ) if "GCOUNT" not in self._header: self._header.set("GCOUNT", 1, "number of groups", after="PCOUNT") self._ffo = _File(name, "append") # TODO : Fix this once the HDU writing API is cleaned up tmp_hdu = _BaseHDU() # Passing self._header as an argument to _BaseHDU() will cause its # values to be modified in undesired ways...need to have a better way # of doing this tmp_hdu._header = self._header self._header_offset = tmp_hdu._writeheader(self._ffo)[0] self._data_offset = self._ffo.tell() self._size = self.size if self._size != 0: self.writecomplete = False else: self.writecomplete = True # Support the 'with' statement def __enter__(self): return self def __exit__(self, type, value, traceback): self.close() def write(self, data): """ Write the given data to the stream. Parameters ---------- data : ndarray Data to stream to the file. Returns ------- writecomplete : int Flag that when `True` indicates that all of the required data has been written to the stream. Notes ----- Only the amount of data specified in the header provided to the class constructor may be written to the stream. If the provided data would cause the stream to overflow, an `OSError` exception is raised and the data is not written. Once sufficient data has been written to the stream to satisfy the amount specified in the header, the stream is padded to fill a complete FITS block and no more data will be accepted. An attempt to write more data after the stream has been filled will raise an `OSError` exception. If the dtype of the input data does not match what is expected by the header, a `TypeError` exception is raised. """ size = self._ffo.tell() - self._data_offset if self.writecomplete or size + data.nbytes > self._size: raise OSError( "Attempt to write more data to the stream than the header specified." ) if BITPIX2DTYPE[self._header["BITPIX"]] != data.dtype.name: raise TypeError( "Supplied data does not match the type specified in the header." ) if data.dtype.str[0] != ">": # byteswap little endian arrays before writing output = data.byteswap() else: output = data self._ffo.writearray(output) if self._ffo.tell() - self._data_offset == self._size: # the stream is full so pad the data to the next FITS block self._ffo.write(_pad_length(self._size) * "\0") self.writecomplete = True self._ffo.flush() return self.writecomplete @property def size(self): """ Return the size (in bytes) of the data portion of the HDU. """ size = 0 naxis = self._header.get("NAXIS", 0) if naxis > 0: simple = self._header.get("SIMPLE", "F") random_groups = self._header.get("GROUPS", "F") if simple == "T" and random_groups == "T": groups = 1 else: groups = 0 size = 1 for idx in range(groups, naxis): size = size * self._header["NAXIS" + str(idx + 1)] bitpix = self._header["BITPIX"] gcount = self._header.get("GCOUNT", 1) pcount = self._header.get("PCOUNT", 0) size = abs(bitpix) * gcount * (pcount + size) // 8 return size def close(self): """ Close the physical FITS file. """ self._ffo.close()
3d20822b86bdcee91cb20aef54e358302ebcf31db25197ad1d9c4f79790ea148
# Licensed under a 3-clause BSD style license - see PYFITS.rst import contextlib import copy import gc import pickle import re import sys import warnings import numpy as np import pytest from numpy import char as chararray try: import objgraph HAVE_OBJGRAPH = True except ImportError: HAVE_OBJGRAPH = False from astropy.io import fits from astropy.io.fits.column import NUMPY2FITS, ColumnAttribute, Delayed from astropy.io.fits.util import decode_ascii from astropy.io.fits.verify import VerifyError from astropy.table import Table from astropy.units import Unit, UnitsWarning, UnrecognizedUnit from astropy.utils.compat import NUMPY_LT_1_22, NUMPY_LT_1_22_1 from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyUserWarning from .conftest import FitsTestCase def comparefloats(a, b): """ Compare two float scalars or arrays and see if they are consistent Consistency is determined ensuring the difference is less than the expected amount. Return True if consistent, False if any differences. """ aa = a bb = b # compute expected precision if aa.dtype.name == "float32" or bb.dtype.name == "float32": precision = 0.000001 else: precision = 0.0000000000000001 precision = 0.00001 # until precision problem is fixed in astropy.io.fits diff = np.absolute(aa - bb) mask0 = aa == 0 masknz = aa != 0.0 if np.any(mask0): if diff[mask0].max() != 0.0: return False if np.any(masknz): if (diff[masknz] / np.absolute(aa[masknz])).max() > precision: return False return True def comparerecords(a, b): """ Compare two record arrays Does this field by field, using approximation testing for float columns (Complex not yet handled.) Column names not compared, but column types and sizes are. """ nfieldsa = len(a.dtype.names) nfieldsb = len(b.dtype.names) if nfieldsa != nfieldsb: print("number of fields don't match") return False for i in range(nfieldsa): fielda = a.field(i) fieldb = b.field(i) if fielda.dtype.char == "S": fielda = decode_ascii(fielda) if fieldb.dtype.char == "S": fieldb = decode_ascii(fieldb) if not isinstance(fielda, type(fieldb)) and not isinstance( fieldb, type(fielda) ): print("type(fielda): ", type(fielda), " fielda: ", fielda) print("type(fieldb): ", type(fieldb), " fieldb: ", fieldb) print(f"field {i} type differs") return False if len(fielda) and isinstance(fielda[0], np.floating): if not comparefloats(fielda, fieldb): print("fielda: ", fielda) print("fieldb: ", fieldb) print(f"field {i} differs") return False elif isinstance(fielda, fits.column._VLF) or isinstance( fieldb, fits.column._VLF ): for row in range(len(fielda)): if np.any(fielda[row] != fieldb[row]): print(f"fielda[{row}]: {fielda[row]}") print(f"fieldb[{row}]: {fieldb[row]}") print(f"field {i} differs in row {row}") else: if np.any(fielda != fieldb): print("fielda: ", fielda) print("fieldb: ", fieldb) print(f"field {i} differs") return False return True def _assert_attr_col(new_tbhdu, tbhdu): """ Helper function to compare column attributes """ # Double check that the headers are equivalent assert tbhdu.columns.names == new_tbhdu.columns.names attrs = [ k for k, v in fits.Column.__dict__.items() if isinstance(v, ColumnAttribute) ] for name in tbhdu.columns.names: col = tbhdu.columns[name] new_col = new_tbhdu.columns[name] for attr in attrs: if getattr(col, attr) and getattr(new_col, attr): assert getattr(col, attr) == getattr(new_col, attr) class TestTableFunctions(FitsTestCase): def test_constructor_copies_header(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/153 Ensure that a header from one HDU is copied when used to initialize new HDU. This is like the test of the same name in test_image, but tests this for tables as well. """ ifd = fits.HDUList([fits.PrimaryHDU(), fits.BinTableHDU()]) thdr = ifd[1].header thdr["FILENAME"] = "labq01i3q_rawtag.fits" thdu = fits.BinTableHDU(header=thdr) ofd = fits.HDUList(thdu) ofd[0].header["FILENAME"] = "labq01i3q_flt.fits" # Original header should be unchanged assert thdr["FILENAME"] == "labq01i3q_rawtag.fits" def test_open(self, home_is_data): # open some existing FITS files: tt = fits.open(self.data("tb.fits")) fd = fits.open(self.data("test0.fits")) # create some local arrays a1 = chararray.array(["abc", "def", "xx"]) r1 = np.array([11.0, 12.0, 13.0], dtype=np.float32) # create a table from scratch, using a mixture of columns from existing # tables and locally created arrays: # first, create individual column definitions c1 = fits.Column(name="abc", format="3A", array=a1) c2 = fits.Column(name="def", format="E", array=r1) a3 = np.array([3, 4, 5], dtype="i2") c3 = fits.Column(name="xyz", format="I", array=a3) a4 = np.array([1, 2, 3], dtype="i2") c4 = fits.Column(name="t1", format="I", array=a4) a5 = np.array([3 + 3j, 4 + 4j, 5 + 5j], dtype="c8") c5 = fits.Column(name="t2", format="C", array=a5) # Note that X format must be two-D array a6 = np.array([[0], [1], [0]], dtype=np.uint8) c6 = fits.Column(name="t3", format="X", array=a6) a7 = np.array([101, 102, 103], dtype="i4") c7 = fits.Column(name="t4", format="J", array=a7) a8 = np.array( [ [1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1], [0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0], [1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1], ], dtype=np.uint8, ) c8 = fits.Column(name="t5", format="11X", array=a8) # second, create a column-definitions object for all columns in a table x = fits.ColDefs([c1, c2, c3, c4, c5, c6, c7, c8]) tbhdu = fits.BinTableHDU.from_columns(x) # another way to create a table is by using existing table's # information: x2 = fits.ColDefs(tt[1]) t2 = fits.BinTableHDU.from_columns(x2, nrows=2) ra = np.rec.array( [(1, "abc", 3.7000002861022949, 0), (2, "xy ", 6.6999998092651367, 1)], names="c1, c2, c3, c4", ) assert comparerecords(t2.data, ra) # the table HDU's data is a subclass of a record array, so we can # access one row like this: assert tbhdu.data[1][0] == a1[1] assert tbhdu.data[1][1] == r1[1] assert tbhdu.data[1][2] == a3[1] assert tbhdu.data[1][3] == a4[1] assert tbhdu.data[1][4] == a5[1] assert (tbhdu.data[1][5] == a6[1].view("bool")).all() assert tbhdu.data[1][6] == a7[1] assert (tbhdu.data[1][7] == a8[1]).all() # and a column like this: assert str(tbhdu.data.field("abc")) == "['abc' 'def' 'xx']" # An alternative way to create a column-definitions object is from an # existing table. _ = fits.ColDefs(tt[1]) # now we write out the newly created table HDU to a FITS file: fout = fits.HDUList(fits.PrimaryHDU()) fout.append(tbhdu) fout.writeto(self.temp("tableout1.fits"), overwrite=True) with fits.open(self.temp("tableout1.fits")) as f2: temp = f2[1].data.field(7) assert ( temp[0] == [ True, True, False, True, False, True, True, True, False, False, True, ] ).all() # An alternative way to create an output table FITS file: fout2 = fits.open(self.temp("tableout2.fits"), "append") fout2.append(fd[0]) fout2.append(tbhdu) fout2.close() tt.close() fd.close() def test_binary_table(self): # binary table: t = fits.open(self.data("tb.fits")) assert t[1].header["tform1"] == "1J" info = { "name": ["c1", "c2", "c3", "c4"], "format": ["1J", "3A", "1E", "1L"], "unit": ["", "", "", ""], "null": [-2147483647, "", "", ""], "bscale": ["", "", 3, ""], "bzero": ["", "", 0.4, ""], "disp": ["I11", "A3", "G15.7", "L6"], "start": ["", "", "", ""], "dim": ["", "", "", ""], "coord_inc": ["", "", "", ""], "coord_type": ["", "", "", ""], "coord_unit": ["", "", "", ""], "coord_ref_point": ["", "", "", ""], "coord_ref_value": ["", "", "", ""], "time_ref_pos": ["", "", "", ""], } assert t[1].columns.info(output=False) == info ra = np.rec.array( [(1, "abc", 3.7000002861022949, 0), (2, "xy ", 6.6999998092651367, 1)], names="c1, c2, c3, c4", ) assert comparerecords(t[1].data, ra[:2]) # Change scaled field and scale back to the original array t[1].data.field("c4")[0] = 1 t[1].data._scale_back() assert str(np.rec.recarray.field(t[1].data, "c4")) == "[84 84]" # look at data column-wise assert (t[1].data.field(0) == np.array([1, 2])).all() # When there are scaled columns, the raw data are in data._parent t.close() def test_ascii_table(self): # ASCII table a = fits.open(self.data("ascii.fits")) ra1 = np.rec.array( [ (10.123000144958496, 37), (5.1999998092651367, 23), (15.609999656677246, 17), (0.0, 0), (345.0, 345), ], names="c1, c2", ) assert comparerecords(a[1].data, ra1) # Test slicing a2 = a[1].data[2:][2:] ra2 = np.rec.array([(345.0, 345)], names="c1, c2") assert comparerecords(a2, ra2) assert (a2.field(1) == np.array([345])).all() ra3 = np.rec.array( [(10.123000144958496, 37), (15.609999656677246, 17), (345.0, 345)], names="c1, c2", ) assert comparerecords(a[1].data[::2], ra3) # Test Start Column a1 = chararray.array(["abcd", "def"]) r1 = np.array([11.0, 12.0]) c1 = fits.Column(name="abc", format="A3", start=19, array=a1) c2 = fits.Column(name="def", format="E", start=3, array=r1) c3 = fits.Column(name="t1", format="I", array=[91, 92, 93]) hdu = fits.TableHDU.from_columns([c2, c1, c3]) assert dict(hdu.data.dtype.fields) == { "abc": (np.dtype("|S3"), 18), "def": (np.dtype("|S15"), 2), "t1": (np.dtype("|S10"), 21), } hdu.writeto(self.temp("toto.fits"), overwrite=True) hdul = fits.open(self.temp("toto.fits")) assert comparerecords(hdu.data, hdul[1].data) hdul.close() # Test Scaling r1 = np.array([11.0, 12.0]) c2 = fits.Column(name="def", format="D", array=r1, bscale=2.3, bzero=0.6) hdu = fits.TableHDU.from_columns([c2]) hdu.writeto(self.temp("toto.fits"), overwrite=True) with open(self.temp("toto.fits")) as f: assert "4.95652173913043548D+00" in f.read() with fits.open(self.temp("toto.fits")) as hdul: assert comparerecords(hdu.data, hdul[1].data) # Test Integer precision according to width c1 = fits.Column(name="t2", format="I2", array=[91, 92, 93]) c2 = fits.Column(name="t4", format="I5", array=[91, 92, 93]) c3 = fits.Column(name="t8", format="I10", array=[91, 92, 93]) hdu = fits.TableHDU.from_columns([c1, c2, c3]) assert c1.array.dtype == np.int16 assert c2.array.dtype == np.int32 assert c3.array.dtype == np.int64 hdu.writeto(self.temp("toto.fits"), overwrite=True) with fits.open(self.temp("toto.fits")) as hdul: assert comparerecords(hdu.data, hdul[1].data) a.close() def test_endianness(self): x = np.ndarray((1,), dtype=object) channelsIn = np.array([3], dtype="uint8") x[0] = channelsIn col = fits.Column(name="Channels", format="PB()", array=x) cols = fits.ColDefs([col]) tbhdu = fits.BinTableHDU.from_columns(cols) tbhdu.name = "RFI" tbhdu.writeto(self.temp("testendian.fits"), overwrite=True) hduL = fits.open(self.temp("testendian.fits")) rfiHDU = hduL["RFI"] data = rfiHDU.data channelsOut = data.field("Channels")[0] assert (channelsIn == channelsOut).all() hduL.close() def test_column_endianness(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/77 (Astropy doesn't preserve byte order of non-native order column arrays) """ a = [1.0, 2.0, 3.0, 4.0] a1 = np.array(a, dtype="<f8") a2 = np.array(a, dtype=">f8") col1 = fits.Column(name="a", format="D", array=a1) col2 = fits.Column(name="b", format="D", array=a2) cols = fits.ColDefs([col1, col2]) tbhdu = fits.BinTableHDU.from_columns(cols) assert (tbhdu.data["a"] == a1).all() assert (tbhdu.data["b"] == a2).all() # Double check that the array is converted to the correct byte-order # for FITS (big-endian). tbhdu.writeto(self.temp("testendian.fits"), overwrite=True) with fits.open(self.temp("testendian.fits")) as hdul: assert (hdul[1].data["a"] == a2).all() assert (hdul[1].data["b"] == a2).all() def test_recarray_to_bintablehdu(self): bright = np.rec.array( [ (1, "Serius", -1.45, "A1V"), (2, "Canopys", -0.73, "F0Ib"), (3, "Rigil Kent", -0.1, "G2V"), ], formats="int16,a20,float32,a10", names="order,name,mag,Sp", ) hdu = fits.BinTableHDU(bright) assert comparerecords(hdu.data, bright) hdu.writeto(self.temp("toto.fits"), overwrite=True) hdul = fits.open(self.temp("toto.fits")) assert comparerecords(hdu.data, hdul[1].data) assert comparerecords(bright, hdul[1].data) hdul.close() def test_numpy_ndarray_to_bintablehdu(self): desc = np.dtype( { "names": ["order", "name", "mag", "Sp"], "formats": ["int", "S20", "float32", "S10"], } ) a = np.array( [ (1, "Serius", -1.45, "A1V"), (2, "Canopys", -0.73, "F0Ib"), (3, "Rigil Kent", -0.1, "G2V"), ], dtype=desc, ) hdu = fits.BinTableHDU(a) assert comparerecords(hdu.data, a.view(fits.FITS_rec)) hdu.writeto(self.temp("toto.fits"), overwrite=True) hdul = fits.open(self.temp("toto.fits")) assert comparerecords(hdu.data, hdul[1].data) hdul.close() def test_numpy_ndarray_to_bintablehdu_with_unicode(self): desc = np.dtype( { "names": ["order", "name", "mag", "Sp"], "formats": ["int", "U20", "float32", "U10"], } ) a = np.array( [ (1, "Serius", -1.45, "A1V"), (2, "Canopys", -0.73, "F0Ib"), (3, "Rigil Kent", -0.1, "G2V"), ], dtype=desc, ) hdu = fits.BinTableHDU(a) assert comparerecords(hdu.data, a.view(fits.FITS_rec)) hdu.writeto(self.temp("toto.fits"), overwrite=True) hdul = fits.open(self.temp("toto.fits")) assert comparerecords(hdu.data, hdul[1].data) hdul.close() def test_new_table_from_recarray(self): bright = np.rec.array( [ (1, "Serius", -1.45, "A1V"), (2, "Canopys", -0.73, "F0Ib"), (3, "Rigil Kent", -0.1, "G2V"), ], formats="int16,a20,float64,a10", names="order,name,mag,Sp", ) hdu = fits.TableHDU.from_columns(bright, nrows=2) # Verify that all ndarray objects within the HDU reference the # same ndarray. assert id(hdu.data._coldefs.columns[0].array) == id( hdu.data._coldefs._arrays[0] ) assert id(hdu.data._coldefs.columns[0].array) == id( hdu.columns.columns[0].array ) assert id(hdu.data._coldefs.columns[0].array) == id(hdu.columns._arrays[0]) # Ensure I can change the value of one data element and it effects # all of the others. hdu.data[0][0] = 213 assert hdu.data[0][0] == 213 assert hdu.data._coldefs._arrays[0][0] == 213 assert hdu.data._coldefs.columns[0].array[0] == 213 assert hdu.columns._arrays[0][0] == 213 assert hdu.columns.columns[0].array[0] == 213 hdu.data._coldefs._arrays[0][0] = 100 assert hdu.data[0][0] == 100 assert hdu.data._coldefs._arrays[0][0] == 100 assert hdu.data._coldefs.columns[0].array[0] == 100 assert hdu.columns._arrays[0][0] == 100 assert hdu.columns.columns[0].array[0] == 100 hdu.data._coldefs.columns[0].array[0] = 500 assert hdu.data[0][0] == 500 assert hdu.data._coldefs._arrays[0][0] == 500 assert hdu.data._coldefs.columns[0].array[0] == 500 assert hdu.columns._arrays[0][0] == 500 assert hdu.columns.columns[0].array[0] == 500 hdu.columns._arrays[0][0] = 600 assert hdu.data[0][0] == 600 assert hdu.data._coldefs._arrays[0][0] == 600 assert hdu.data._coldefs.columns[0].array[0] == 600 assert hdu.columns._arrays[0][0] == 600 assert hdu.columns.columns[0].array[0] == 600 hdu.columns.columns[0].array[0] = 800 assert hdu.data[0][0] == 800 assert hdu.data._coldefs._arrays[0][0] == 800 assert hdu.data._coldefs.columns[0].array[0] == 800 assert hdu.columns._arrays[0][0] == 800 assert hdu.columns.columns[0].array[0] == 800 assert (hdu.data.field(0) == np.array([800, 2], dtype=np.int16)).all() assert hdu.data[0][1] == "Serius" assert hdu.data[1][1] == "Canopys" assert (hdu.data.field(2) == np.array([-1.45, -0.73], dtype=np.float64)).all() assert hdu.data[0][3] == "A1V" assert hdu.data[1][3] == "F0Ib" hdu.writeto(self.temp("toto.fits"), overwrite=True) with fits.open(self.temp("toto.fits")) as hdul: assert (hdul[1].data.field(0) == np.array([800, 2], dtype=np.int16)).all() assert hdul[1].data[0][1] == "Serius" assert hdul[1].data[1][1] == "Canopys" assert ( hdul[1].data.field(2) == np.array([-1.45, -0.73], dtype=np.float64) ).all() assert hdul[1].data[0][3] == "A1V" assert hdul[1].data[1][3] == "F0Ib" del hdul hdu = fits.BinTableHDU.from_columns(bright, nrows=2) tmp = np.rec.array( [(1, "Serius", -1.45, "A1V"), (2, "Canopys", -0.73, "F0Ib")], formats="int16,a20,float64,a10", names="order,name,mag,Sp", ) assert comparerecords(hdu.data, tmp) hdu.writeto(self.temp("toto.fits"), overwrite=True) with fits.open(self.temp("toto.fits")) as hdul: assert comparerecords(hdu.data, hdul[1].data) def test_new_fitsrec(self): """ Tests creating a new FITS_rec object from a multi-field ndarray. """ with fits.open(self.data("tb.fits")) as h: data = h[1].data new_data = np.array([(3, "qwe", 4.5, False)], dtype=data.dtype) appended = np.append(data, new_data).view(fits.FITS_rec) assert repr(appended).startswith("FITS_rec(") # This test used to check the entire string representation of FITS_rec, # but that has problems between different numpy versions. Instead just # check that the FITS_rec was created, and we'll let subsequent tests # worry about checking values and such def test_appending_a_column(self): counts = np.array([312, 334, 308, 317]) names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"]) c1 = fits.Column(name="target", format="10A", array=names) c2 = fits.Column(name="counts", format="J", unit="DN", array=counts) c3 = fits.Column(name="notes", format="A10") c4 = fits.Column(name="spectrum", format="5E") c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1]) coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu = fits.BinTableHDU.from_columns(coldefs) tbhdu.writeto(self.temp("table1.fits")) counts = np.array([412, 434, 408, 417]) names = np.array(["NGC5", "NGC6", "NGC7", "NCG8"]) c1 = fits.Column(name="target", format="10A", array=names) c2 = fits.Column(name="counts", format="J", unit="DN", array=counts) c3 = fits.Column(name="notes", format="A10") c4 = fits.Column(name="spectrum", format="5E") c5 = fits.Column(name="flag", format="L", array=[0, 1, 0, 0]) coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu = fits.BinTableHDU.from_columns(coldefs) tbhdu.writeto(self.temp("table2.fits")) # Append the rows of table 2 after the rows of table 1 # The column definitions are assumed to be the same # Open the two files we want to append t1 = fits.open(self.temp("table1.fits")) t2 = fits.open(self.temp("table2.fits")) # Get the number of rows in the table from the first file nrows1 = t1[1].data.shape[0] # Get the total number of rows in the resulting appended table nrows = t1[1].data.shape[0] + t2[1].data.shape[0] assert t1[1].columns._arrays[1] is t1[1].columns.columns[1].array # Create a new table that consists of the data from the first table # but has enough space in the ndarray to hold the data from both tables hdu = fits.BinTableHDU.from_columns(t1[1].columns, nrows=nrows) # For each column in the tables append the data from table 2 after the # data from table 1. for i in range(len(t1[1].columns)): hdu.data.field(i)[nrows1:] = t2[1].data.field(i) hdu.writeto(self.temp("newtable.fits")) info = [ (0, "PRIMARY", 1, "PrimaryHDU", 4, (), "", ""), (1, "", 1, "BinTableHDU", 19, "8R x 5C", "[10A, J, 10A, 5E, L]", ""), ] assert fits.info(self.temp("newtable.fits"), output=False) == info z = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32) array = np.rec.array( [ ("NGC1", 312, "", z, True), ("NGC2", 334, "", z, False), ("NGC3", 308, "", z, True), ("NCG4", 317, "", z, True), ("NGC5", 412, "", z, False), ("NGC6", 434, "", z, True), ("NGC7", 408, "", z, False), ("NCG8", 417, "", z, False), ], formats="a10,u4,a10,5f4,l", ) assert comparerecords(hdu.data, array) # Verify that all of the references to the data point to the same # numarray hdu.data[0][1] = 300 assert hdu.data._coldefs._arrays[1][0] == 300 assert hdu.data._coldefs.columns[1].array[0] == 300 assert hdu.columns._arrays[1][0] == 300 assert hdu.columns.columns[1].array[0] == 300 assert hdu.data[0][1] == 300 hdu.data._coldefs._arrays[1][0] = 200 assert hdu.data._coldefs._arrays[1][0] == 200 assert hdu.data._coldefs.columns[1].array[0] == 200 assert hdu.columns._arrays[1][0] == 200 assert hdu.columns.columns[1].array[0] == 200 assert hdu.data[0][1] == 200 hdu.data._coldefs.columns[1].array[0] = 100 assert hdu.data._coldefs._arrays[1][0] == 100 assert hdu.data._coldefs.columns[1].array[0] == 100 assert hdu.columns._arrays[1][0] == 100 assert hdu.columns.columns[1].array[0] == 100 assert hdu.data[0][1] == 100 hdu.columns._arrays[1][0] = 90 assert hdu.data._coldefs._arrays[1][0] == 90 assert hdu.data._coldefs.columns[1].array[0] == 90 assert hdu.columns._arrays[1][0] == 90 assert hdu.columns.columns[1].array[0] == 90 assert hdu.data[0][1] == 90 hdu.columns.columns[1].array[0] = 80 assert hdu.data._coldefs._arrays[1][0] == 80 assert hdu.data._coldefs.columns[1].array[0] == 80 assert hdu.columns._arrays[1][0] == 80 assert hdu.columns.columns[1].array[0] == 80 assert hdu.data[0][1] == 80 # Same verification from the file hdul = fits.open(self.temp("newtable.fits")) hdu = hdul[1] hdu.data[0][1] = 300 assert hdu.data._coldefs._arrays[1][0] == 300 assert hdu.data._coldefs.columns[1].array[0] == 300 assert hdu.columns._arrays[1][0] == 300 assert hdu.columns.columns[1].array[0] == 300 assert hdu.data[0][1] == 300 hdu.data._coldefs._arrays[1][0] = 200 assert hdu.data._coldefs._arrays[1][0] == 200 assert hdu.data._coldefs.columns[1].array[0] == 200 assert hdu.columns._arrays[1][0] == 200 assert hdu.columns.columns[1].array[0] == 200 assert hdu.data[0][1] == 200 hdu.data._coldefs.columns[1].array[0] = 100 assert hdu.data._coldefs._arrays[1][0] == 100 assert hdu.data._coldefs.columns[1].array[0] == 100 assert hdu.columns._arrays[1][0] == 100 assert hdu.columns.columns[1].array[0] == 100 assert hdu.data[0][1] == 100 hdu.columns._arrays[1][0] = 90 assert hdu.data._coldefs._arrays[1][0] == 90 assert hdu.data._coldefs.columns[1].array[0] == 90 assert hdu.columns._arrays[1][0] == 90 assert hdu.columns.columns[1].array[0] == 90 assert hdu.data[0][1] == 90 hdu.columns.columns[1].array[0] = 80 assert hdu.data._coldefs._arrays[1][0] == 80 assert hdu.data._coldefs.columns[1].array[0] == 80 assert hdu.columns._arrays[1][0] == 80 assert hdu.columns.columns[1].array[0] == 80 assert hdu.data[0][1] == 80 t1.close() t2.close() hdul.close() def test_adding_a_column(self): # Tests adding a column to a table. counts = np.array([312, 334, 308, 317]) names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"]) c1 = fits.Column(name="target", format="10A", array=names) c2 = fits.Column(name="counts", format="J", unit="DN", array=counts) c3 = fits.Column(name="notes", format="A10") c4 = fits.Column(name="spectrum", format="5E") c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1]) coldefs = fits.ColDefs([c1, c2, c3, c4]) tbhdu = fits.BinTableHDU.from_columns(coldefs) assert tbhdu.columns.names == ["target", "counts", "notes", "spectrum"] coldefs1 = coldefs + c5 tbhdu1 = fits.BinTableHDU.from_columns(coldefs1) assert tbhdu1.columns.names == ["target", "counts", "notes", "spectrum", "flag"] z = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32) array = np.rec.array( [ ("NGC1", 312, "", z, True), ("NGC2", 334, "", z, False), ("NGC3", 308, "", z, True), ("NCG4", 317, "", z, True), ], formats="a10,u4,a10,5f4,l", ) assert comparerecords(tbhdu1.data, array) def test_adding_a_column_inplace(self): # Tests adding a column to a table. counts = np.array([312, 334, 308, 317]) names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"]) c1 = fits.Column(name="target", format="10A", array=names) c2 = fits.Column(name="counts", format="J", unit="DN", array=counts) c3 = fits.Column(name="notes", format="A10") c4 = fits.Column(name="spectrum", format="5E") c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1]) coldefs = fits.ColDefs([c1, c2, c3, c4]) tbhdu = fits.BinTableHDU.from_columns(coldefs) assert tbhdu.columns.names == ["target", "counts", "notes", "spectrum"] tbhdu.columns.add_col(c5) assert tbhdu.columns.names == ["target", "counts", "notes", "spectrum", "flag"] z = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32) array = np.rec.array( [ ("NGC1", 312, "", z, True), ("NGC2", 334, "", z, False), ("NGC3", 308, "", z, True), ("NCG4", 317, "", z, True), ], formats="a10,u4,a10,5f4,l", ) assert comparerecords(tbhdu.data, array) def test_adding_a_column_to_file(self): hdul = fits.open(self.data("table.fits")) tbhdu = hdul[1] col = fits.Column(name="a", array=np.array([1, 2]), format="K") tbhdu.columns.add_col(col) assert tbhdu.columns.names == ["target", "V_mag", "a"] array = np.rec.array( [("NGC1001", 11.1, 1), ("NGC1002", 12.3, 2), ("NGC1003", 15.2, 0)], formats="a20,f4,i8", ) assert comparerecords(tbhdu.data, array) hdul.close() def test_removing_a_column_inplace(self): # Tests adding a column to a table. counts = np.array([312, 334, 308, 317]) names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"]) c1 = fits.Column(name="target", format="10A", array=names) c2 = fits.Column(name="counts", format="J", unit="DN", array=counts) c3 = fits.Column(name="notes", format="A10") c4 = fits.Column(name="spectrum", format="5E") c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1]) coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu = fits.BinTableHDU.from_columns(coldefs) assert tbhdu.columns.names == ["target", "counts", "notes", "spectrum", "flag"] tbhdu.columns.del_col("flag") assert tbhdu.columns.names == ["target", "counts", "notes", "spectrum"] z = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32) array = np.rec.array( [ ("NGC1", 312, "", z), ("NGC2", 334, "", z), ("NGC3", 308, "", z), ("NCG4", 317, "", z), ], formats="a10,u4,a10,5f4", ) assert comparerecords(tbhdu.data, array) tbhdu.columns.del_col("counts") tbhdu.columns.del_col("notes") assert tbhdu.columns.names == ["target", "spectrum"] array = np.rec.array( [("NGC1", z), ("NGC2", z), ("NGC3", z), ("NCG4", z)], formats="a10,5f4" ) assert comparerecords(tbhdu.data, array) def test_removing_a_column_from_file(self): hdul = fits.open(self.data("table.fits")) tbhdu = hdul[1] tbhdu.columns.del_col("V_mag") assert tbhdu.columns.names == ["target"] array = np.rec.array([("NGC1001",), ("NGC1002",), ("NGC1003",)], formats="a20") assert comparerecords(tbhdu.data, array) hdul.close() def test_merge_tables(self): counts = np.array([312, 334, 308, 317]) names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"]) c1 = fits.Column(name="target", format="10A", array=names) c2 = fits.Column(name="counts", format="J", unit="DN", array=counts) c3 = fits.Column(name="notes", format="A10") c4 = fits.Column(name="spectrum", format="5E") c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1]) coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu = fits.BinTableHDU.from_columns(coldefs) tbhdu.writeto(self.temp("table1.fits")) counts = np.array([412, 434, 408, 417]) names = np.array(["NGC5", "NGC6", "NGC7", "NCG8"]) c1 = fits.Column(name="target1", format="10A", array=names) c2 = fits.Column(name="counts1", format="J", unit="DN", array=counts) c3 = fits.Column(name="notes1", format="A10") c4 = fits.Column(name="spectrum1", format="5E") c5 = fits.Column(name="flag1", format="L", array=[0, 1, 0, 0]) coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu = fits.BinTableHDU.from_columns(coldefs) tbhdu.writeto(self.temp("table2.fits")) # Merge the columns of table 2 after the columns of table 1 # The column names are assumed to be different # Open the two files we want to append t1 = fits.open(self.temp("table1.fits")) t2 = fits.open(self.temp("table2.fits")) hdu = fits.BinTableHDU.from_columns(t1[1].columns + t2[1].columns) z = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32) array = np.rec.array( [ ("NGC1", 312, "", z, True, "NGC5", 412, "", z, False), ("NGC2", 334, "", z, False, "NGC6", 434, "", z, True), ("NGC3", 308, "", z, True, "NGC7", 408, "", z, False), ("NCG4", 317, "", z, True, "NCG8", 417, "", z, False), ], formats="a10,u4,a10,5f4,l,a10,u4,a10,5f4,l", ) assert comparerecords(hdu.data, array) hdu.writeto(self.temp("newtable.fits")) # Verify that all of the references to the data point to the same # numarray hdu.data[0][1] = 300 assert hdu.data._coldefs._arrays[1][0] == 300 assert hdu.data._coldefs.columns[1].array[0] == 300 assert hdu.columns._arrays[1][0] == 300 assert hdu.columns.columns[1].array[0] == 300 assert hdu.data[0][1] == 300 hdu.data._coldefs._arrays[1][0] = 200 assert hdu.data._coldefs._arrays[1][0] == 200 assert hdu.data._coldefs.columns[1].array[0] == 200 assert hdu.columns._arrays[1][0] == 200 assert hdu.columns.columns[1].array[0] == 200 assert hdu.data[0][1] == 200 hdu.data._coldefs.columns[1].array[0] = 100 assert hdu.data._coldefs._arrays[1][0] == 100 assert hdu.data._coldefs.columns[1].array[0] == 100 assert hdu.columns._arrays[1][0] == 100 assert hdu.columns.columns[1].array[0] == 100 assert hdu.data[0][1] == 100 hdu.columns._arrays[1][0] = 90 assert hdu.data._coldefs._arrays[1][0] == 90 assert hdu.data._coldefs.columns[1].array[0] == 90 assert hdu.columns._arrays[1][0] == 90 assert hdu.columns.columns[1].array[0] == 90 assert hdu.data[0][1] == 90 hdu.columns.columns[1].array[0] = 80 assert hdu.data._coldefs._arrays[1][0] == 80 assert hdu.data._coldefs.columns[1].array[0] == 80 assert hdu.columns._arrays[1][0] == 80 assert hdu.columns.columns[1].array[0] == 80 assert hdu.data[0][1] == 80 info = [ (0, "PRIMARY", 1, "PrimaryHDU", 4, (), "", ""), ( 1, "", 1, "BinTableHDU", 30, "4R x 10C", "[10A, J, 10A, 5E, L, 10A, J, 10A, 5E, L]", "", ), ] assert fits.info(self.temp("newtable.fits"), output=False) == info hdul = fits.open(self.temp("newtable.fits")) hdu = hdul[1] assert hdu.columns.names == [ "target", "counts", "notes", "spectrum", "flag", "target1", "counts1", "notes1", "spectrum1", "flag1", ] z = np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32) array = np.rec.array( [ ("NGC1", 312, "", z, True, "NGC5", 412, "", z, False), ("NGC2", 334, "", z, False, "NGC6", 434, "", z, True), ("NGC3", 308, "", z, True, "NGC7", 408, "", z, False), ("NCG4", 317, "", z, True, "NCG8", 417, "", z, False), ], formats="a10,u4,a10,5f4,l,a10,u4,a10,5f4,l", ) assert comparerecords(hdu.data, array) # Same verification from the file hdu.data[0][1] = 300 assert hdu.data._coldefs._arrays[1][0] == 300 assert hdu.data._coldefs.columns[1].array[0] == 300 assert hdu.columns._arrays[1][0] == 300 assert hdu.columns.columns[1].array[0] == 300 assert hdu.data[0][1] == 300 hdu.data._coldefs._arrays[1][0] = 200 assert hdu.data._coldefs._arrays[1][0] == 200 assert hdu.data._coldefs.columns[1].array[0] == 200 assert hdu.columns._arrays[1][0] == 200 assert hdu.columns.columns[1].array[0] == 200 assert hdu.data[0][1] == 200 hdu.data._coldefs.columns[1].array[0] = 100 assert hdu.data._coldefs._arrays[1][0] == 100 assert hdu.data._coldefs.columns[1].array[0] == 100 assert hdu.columns._arrays[1][0] == 100 assert hdu.columns.columns[1].array[0] == 100 assert hdu.data[0][1] == 100 hdu.columns._arrays[1][0] = 90 assert hdu.data._coldefs._arrays[1][0] == 90 assert hdu.data._coldefs.columns[1].array[0] == 90 assert hdu.columns._arrays[1][0] == 90 assert hdu.columns.columns[1].array[0] == 90 assert hdu.data[0][1] == 90 hdu.columns.columns[1].array[0] = 80 assert hdu.data._coldefs._arrays[1][0] == 80 assert hdu.data._coldefs.columns[1].array[0] == 80 assert hdu.columns._arrays[1][0] == 80 assert hdu.columns.columns[1].array[0] == 80 assert hdu.data[0][1] == 80 t1.close() t2.close() hdul.close() def test_modify_column_attributes(self): """Regression test for https://github.com/astropy/astropy/issues/996 This just tests one particular use case, but it should apply pretty well to other similar cases. """ NULLS = {"a": 2, "b": "b", "c": 2.3} data = np.array( list(zip([1, 2, 3, 4], ["a", "b", "c", "d"], [2.3, 4.5, 6.7, 8.9])), dtype=[("a", int), ("b", "S1"), ("c", float)], ) b = fits.BinTableHDU(data=data) for col in b.columns: col.null = NULLS[col.name] b.writeto(self.temp("test.fits"), overwrite=True) with fits.open(self.temp("test.fits")) as hdul: header = hdul[1].header assert header["TNULL1"] == 2 assert header["TNULL2"] == "b" assert header["TNULL3"] == 2.3 def test_multidimension_table_from_numpy_rec_columns(self): """Regression test for https://github.com/astropy/astropy/issues/5280 and https://github.com/astropy/astropy/issues/5287 multidimentional tables can now be written with the correct TDIM. Author: Stephen Bailey. """ dtype = [ ("x", (str, 5)), # 1D column of 5-character strings ("y", (str, 3), (4,)), # 2D column; each row is four 3-char strings ] data = np.zeros(2, dtype=dtype) data["x"] = ["abcde", "xyz"] data["y"][0] = ["A", "BC", "DEF", "123"] data["y"][1] = ["X", "YZ", "PQR", "999"] table = Table(data) # Test convenience functions io.fits.writeto / getdata fits.writeto(self.temp("test.fits"), data) dx = fits.getdata(self.temp("test.fits")) assert data["x"].dtype == dx["x"].dtype assert data["y"].dtype == dx["y"].dtype assert np.all(data["x"] == dx["x"]), f"x: {data['x']} != {dx['x']}" assert np.all(data["y"] == dx["y"]), f"y: {data['y']} != {dx['y']}" # Test fits.BinTableHDU(data) and avoid convenience functions hdu0 = fits.PrimaryHDU() hdu1 = fits.BinTableHDU(data) hx = fits.HDUList([hdu0, hdu1]) hx.writeto(self.temp("test2.fits")) fx = fits.open(self.temp("test2.fits")) dx = fx[1].data fx.close() assert data["x"].dtype == dx["x"].dtype assert data["y"].dtype == dx["y"].dtype assert np.all(data["x"] == dx["x"]), f"x: {data['x']} != {dx['x']}" assert np.all(data["y"] == dx["y"]), f"y: {data['y']} != {dx['y']}" # Test Table write and read table.write(self.temp("test3.fits")) tx = Table.read(self.temp("test3.fits"), character_as_bytes=False) assert table["x"].dtype == tx["x"].dtype assert table["y"].dtype == tx["y"].dtype assert np.all(table["x"] == tx["x"]), f"x: {table['x']} != {tx['x']}" assert np.all(table["y"] == tx["y"]), f"y: {table['y']} != {tx['y']}" def test_mask_array(self): t = fits.open(self.data("table.fits")) tbdata = t[1].data mask = tbdata.field("V_mag") > 12 newtbdata = tbdata[mask] hdu = fits.BinTableHDU(newtbdata) hdu.writeto(self.temp("newtable.fits")) hdul = fits.open(self.temp("newtable.fits")) # match to a regex rather than a specific string. expect = r"\[\('NGC1002',\s+12.3[0-9]*\) \(\'NGC1003\',\s+15.[0-9]+\)\]" assert re.match(expect, str(hdu.data)) assert re.match(expect, str(hdul[1].data)) t.close() hdul.close() def test_slice_a_row(self): counts = np.array([312, 334, 308, 317]) names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"]) c1 = fits.Column(name="target", format="10A", array=names) c2 = fits.Column(name="counts", format="J", unit="DN", array=counts) c3 = fits.Column(name="notes", format="A10") c4 = fits.Column(name="spectrum", format="5E") c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1]) coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu = fits.BinTableHDU.from_columns(coldefs) tbhdu.writeto(self.temp("table1.fits")) t1 = fits.open(self.temp("table1.fits")) row = t1[1].data[2] assert row["counts"] == 308 a, b, c = row[1:4] assert a == counts[2] assert b == "" assert (c == np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32)).all() row["counts"] = 310 assert row["counts"] == 310 row[1] = 315 assert row["counts"] == 315 assert row[1:4]["counts"] == 315 pytest.raises(KeyError, lambda r: r[1:4]["flag"], row) row[1:4]["counts"] = 300 assert row[1:4]["counts"] == 300 assert row["counts"] == 300 row[1:4][0] = 400 assert row[1:4]["counts"] == 400 row[1:4]["counts"] = 300 assert row[1:4]["counts"] == 300 # Test stepping for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/59 row[1:4][::-1][-1] = 500 assert row[1:4]["counts"] == 500 row[1:4:2][0] = 300 assert row[1:4]["counts"] == 300 pytest.raises(KeyError, lambda r: r[1:4]["flag"], row) assert row[1:4].field(0) == 300 assert row[1:4].field("counts") == 300 pytest.raises(KeyError, row[1:4].field, "flag") row[1:4].setfield("counts", 500) assert row[1:4].field(0) == 500 pytest.raises(KeyError, row[1:4].setfield, "flag", False) assert t1[1].data._coldefs._arrays[1][2] == 500 assert t1[1].data._coldefs.columns[1].array[2] == 500 assert t1[1].columns._arrays[1][2] == 500 assert t1[1].columns.columns[1].array[2] == 500 assert t1[1].data[2][1] == 500 t1.close() def test_fits_record_len(self): counts = np.array([312, 334, 308, 317]) names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"]) c1 = fits.Column(name="target", format="10A", array=names) c2 = fits.Column(name="counts", format="J", unit="DN", array=counts) c3 = fits.Column(name="notes", format="A10") c4 = fits.Column(name="spectrum", format="5E") c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1]) coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu = fits.BinTableHDU.from_columns(coldefs) tbhdu.writeto(self.temp("table1.fits")) t1 = fits.open(self.temp("table1.fits")) assert len(t1[1].data[0]) == 5 assert len(t1[1].data[0][0:4]) == 4 assert len(t1[1].data[0][0:5]) == 5 assert len(t1[1].data[0][0:6]) == 5 assert len(t1[1].data[0][0:7]) == 5 assert len(t1[1].data[0][1:4]) == 3 assert len(t1[1].data[0][1:5]) == 4 assert len(t1[1].data[0][1:6]) == 4 assert len(t1[1].data[0][1:7]) == 4 t1.close() def test_add_data_by_rows(self): counts = np.array([312, 334, 308, 317]) names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"]) c1 = fits.Column(name="target", format="10A", array=names) c2 = fits.Column(name="counts", format="J", unit="DN", array=counts) c3 = fits.Column(name="notes", format="A10") c4 = fits.Column(name="spectrum", format="5E") c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1]) coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu1 = fits.BinTableHDU.from_columns(coldefs) c1 = fits.Column(name="target", format="10A") c2 = fits.Column(name="counts", format="J", unit="DN") c3 = fits.Column(name="notes", format="A10") c4 = fits.Column(name="spectrum", format="5E") c5 = fits.Column(name="flag", format="L") coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu = fits.BinTableHDU.from_columns(coldefs, nrows=5) # Test assigning data to a tables row using a FITS_record tbhdu.data[0] = tbhdu1.data[0] tbhdu.data[4] = tbhdu1.data[3] # Test assigning data to a tables row using a tuple tbhdu.data[2] = ( "NGC1", 312, "A Note", np.array([1.1, 2.2, 3.3, 4.4, 5.5], dtype=np.float32), True, ) # Test assigning data to a tables row using a list tbhdu.data[3] = [ "JIM1", "33", "A Note", np.array([1.0, 2.0, 3.0, 4.0, 5.0], dtype=np.float32), True, ] # Verify that all ndarray objects within the HDU reference the # same ndarray. assert id(tbhdu.data._coldefs.columns[0].array) == id( tbhdu.data._coldefs._arrays[0] ) assert id(tbhdu.data._coldefs.columns[0].array) == id( tbhdu.columns.columns[0].array ) assert id(tbhdu.data._coldefs.columns[0].array) == id(tbhdu.columns._arrays[0]) assert tbhdu.data[0][1] == 312 assert tbhdu.data._coldefs._arrays[1][0] == 312 assert tbhdu.data._coldefs.columns[1].array[0] == 312 assert tbhdu.columns._arrays[1][0] == 312 assert tbhdu.columns.columns[1].array[0] == 312 assert tbhdu.columns.columns[0].array[0] == "NGC1" assert tbhdu.columns.columns[2].array[0] == "" assert ( tbhdu.columns.columns[3].array[0] == np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32) ).all() assert ( isinstance( v := (tbhdu.columns.columns[4].array[0] == np.True_), (bool, np.bool_) ) and v ) assert tbhdu.data[3][1] == 33 assert tbhdu.data._coldefs._arrays[1][3] == 33 assert tbhdu.data._coldefs.columns[1].array[3] == 33 assert tbhdu.columns._arrays[1][3] == 33 assert tbhdu.columns.columns[1].array[3] == 33 assert tbhdu.columns.columns[0].array[3] == "JIM1" assert tbhdu.columns.columns[2].array[3] == "A Note" assert ( tbhdu.columns.columns[3].array[3] == np.array([1.0, 2.0, 3.0, 4.0, 5.0], dtype=np.float32) ).all() assert ( isinstance( v := (tbhdu.columns.columns[4].array[3] == np.True_), (bool, np.bool_) ) and v ) def test_assign_multiple_rows_to_table(self): counts = np.array([312, 334, 308, 317]) names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"]) c1 = fits.Column(name="target", format="10A", array=names) c2 = fits.Column(name="counts", format="J", unit="DN", array=counts) c3 = fits.Column(name="notes", format="A10") c4 = fits.Column(name="spectrum", format="5E") c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1]) coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu1 = fits.BinTableHDU.from_columns(coldefs) counts = np.array([112, 134, 108, 117]) names = np.array(["NGC5", "NGC6", "NGC7", "NCG8"]) c1 = fits.Column(name="target", format="10A", array=names) c2 = fits.Column(name="counts", format="J", unit="DN", array=counts) c3 = fits.Column(name="notes", format="A10") c4 = fits.Column(name="spectrum", format="5E") c5 = fits.Column(name="flag", format="L", array=[0, 1, 0, 0]) coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu = fits.BinTableHDU.from_columns(coldefs) tbhdu.data[0][3] = np.array([1.0, 2.0, 3.0, 4.0, 5.0], dtype=np.float32) tbhdu2 = fits.BinTableHDU.from_columns(tbhdu1.data, nrows=9) # Assign the 4 rows from the second table to rows 5 thru 8 of the # new table. Note that the last row of the new table will still be # initialized to the default values. tbhdu2.data[4:] = tbhdu.data # Verify that all ndarray objects within the HDU reference the # same ndarray. assert id(tbhdu2.data._coldefs.columns[0].array) == id( tbhdu2.data._coldefs._arrays[0] ) assert id(tbhdu2.data._coldefs.columns[0].array) == id( tbhdu2.columns.columns[0].array ) assert id(tbhdu2.data._coldefs.columns[0].array) == id( tbhdu2.columns._arrays[0] ) assert tbhdu2.data[0][1] == 312 assert tbhdu2.data._coldefs._arrays[1][0] == 312 assert tbhdu2.data._coldefs.columns[1].array[0] == 312 assert tbhdu2.columns._arrays[1][0] == 312 assert tbhdu2.columns.columns[1].array[0] == 312 assert tbhdu2.columns.columns[0].array[0] == "NGC1" assert tbhdu2.columns.columns[2].array[0] == "" assert ( tbhdu2.columns.columns[3].array[0] == np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32) ).all() assert ( isinstance( v := (tbhdu2.columns.columns[4].array[0] == np.True_), (bool, np.bool_) ) and v ) assert tbhdu2.data[4][1] == 112 assert tbhdu2.data._coldefs._arrays[1][4] == 112 assert tbhdu2.data._coldefs.columns[1].array[4] == 112 assert tbhdu2.columns._arrays[1][4] == 112 assert tbhdu2.columns.columns[1].array[4] == 112 assert tbhdu2.columns.columns[0].array[4] == "NGC5" assert tbhdu2.columns.columns[2].array[4] == "" assert ( tbhdu2.columns.columns[3].array[4] == np.array([1.0, 2.0, 3.0, 4.0, 5.0], dtype=np.float32) ).all() assert ( isinstance( v := (tbhdu2.columns.columns[4].array[4] == np.False_), (bool, np.bool_) ) and v ) assert tbhdu2.columns.columns[1].array[8] == 0 assert tbhdu2.columns.columns[0].array[8] == "" assert tbhdu2.columns.columns[2].array[8] == "" assert ( tbhdu2.columns.columns[3].array[8] == np.array([0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float32) ).all() assert ( isinstance( v := (tbhdu2.columns.columns[4].array[8] == np.False_), (bool, np.bool_) ) and v ) def test_verify_data_references(self): counts = np.array([312, 334, 308, 317]) names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"]) c1 = fits.Column(name="target", format="10A", array=names) c2 = fits.Column(name="counts", format="J", unit="DN", array=counts) c3 = fits.Column(name="notes", format="A10") c4 = fits.Column(name="spectrum", format="5E") c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1]) coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu = fits.BinTableHDU.from_columns(coldefs) # Verify that original ColDefs object has independent Column # objects. assert id(coldefs.columns[0]) != id(c1) # Verify that original ColDefs object has independent ndarray # objects. assert id(coldefs.columns[0].array) != id(names) # Verify that original ColDefs object references the same data # object as the original Column object. assert id(coldefs.columns[0].array) == id(c1.array) assert id(coldefs.columns[0].array) == id(coldefs._arrays[0]) # Verify new HDU has an independent ColDefs object. assert id(coldefs) != id(tbhdu.columns) # Verify new HDU has independent Column objects. assert id(coldefs.columns[0]) != id(tbhdu.columns.columns[0]) # Verify new HDU has independent ndarray objects. assert id(coldefs.columns[0].array) != id(tbhdu.columns.columns[0].array) # Verify that both ColDefs objects in the HDU reference the same # Coldefs object. assert id(tbhdu.columns) == id(tbhdu.data._coldefs) # Verify that all ndarray objects within the HDU reference the # same ndarray. assert id(tbhdu.data._coldefs.columns[0].array) == id( tbhdu.data._coldefs._arrays[0] ) assert id(tbhdu.data._coldefs.columns[0].array) == id( tbhdu.columns.columns[0].array ) assert id(tbhdu.data._coldefs.columns[0].array) == id(tbhdu.columns._arrays[0]) tbhdu.writeto(self.temp("table1.fits")) t1 = fits.open(self.temp("table1.fits")) t1[1].data[0][1] = 213 assert t1[1].data[0][1] == 213 assert t1[1].data._coldefs._arrays[1][0] == 213 assert t1[1].data._coldefs.columns[1].array[0] == 213 assert t1[1].columns._arrays[1][0] == 213 assert t1[1].columns.columns[1].array[0] == 213 t1[1].data._coldefs._arrays[1][0] = 100 assert t1[1].data[0][1] == 100 assert t1[1].data._coldefs._arrays[1][0] == 100 assert t1[1].data._coldefs.columns[1].array[0] == 100 assert t1[1].columns._arrays[1][0] == 100 assert t1[1].columns.columns[1].array[0] == 100 t1[1].data._coldefs.columns[1].array[0] = 500 assert t1[1].data[0][1] == 500 assert t1[1].data._coldefs._arrays[1][0] == 500 assert t1[1].data._coldefs.columns[1].array[0] == 500 assert t1[1].columns._arrays[1][0] == 500 assert t1[1].columns.columns[1].array[0] == 500 t1[1].columns._arrays[1][0] = 600 assert t1[1].data[0][1] == 600 assert t1[1].data._coldefs._arrays[1][0] == 600 assert t1[1].data._coldefs.columns[1].array[0] == 600 assert t1[1].columns._arrays[1][0] == 600 assert t1[1].columns.columns[1].array[0] == 600 t1[1].columns.columns[1].array[0] = 800 assert t1[1].data[0][1] == 800 assert t1[1].data._coldefs._arrays[1][0] == 800 assert t1[1].data._coldefs.columns[1].array[0] == 800 assert t1[1].columns._arrays[1][0] == 800 assert t1[1].columns.columns[1].array[0] == 800 t1.close() def test_new_table_with_ndarray(self): counts = np.array([312, 334, 308, 317]) names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"]) c1 = fits.Column(name="target", format="10A", array=names) c2 = fits.Column(name="counts", format="J", unit="DN", array=counts) c3 = fits.Column(name="notes", format="A10") c4 = fits.Column(name="spectrum", format="5E") c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1]) coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu = fits.BinTableHDU.from_columns(coldefs) tbhdu1 = fits.BinTableHDU.from_columns(tbhdu.data.view(np.ndarray)) # Verify that all ndarray objects within the HDU reference the # same ndarray. assert id(tbhdu1.data._coldefs.columns[0].array) == id( tbhdu1.data._coldefs._arrays[0] ) assert id(tbhdu1.data._coldefs.columns[0].array) == id( tbhdu1.columns.columns[0].array ) assert id(tbhdu1.data._coldefs.columns[0].array) == id( tbhdu1.columns._arrays[0] ) # Ensure I can change the value of one data element and it effects # all of the others. tbhdu1.data[0][1] = 213 assert tbhdu1.data[0][1] == 213 assert tbhdu1.data._coldefs._arrays[1][0] == 213 assert tbhdu1.data._coldefs.columns[1].array[0] == 213 assert tbhdu1.columns._arrays[1][0] == 213 assert tbhdu1.columns.columns[1].array[0] == 213 tbhdu1.data._coldefs._arrays[1][0] = 100 assert tbhdu1.data[0][1] == 100 assert tbhdu1.data._coldefs._arrays[1][0] == 100 assert tbhdu1.data._coldefs.columns[1].array[0] == 100 assert tbhdu1.columns._arrays[1][0] == 100 assert tbhdu1.columns.columns[1].array[0] == 100 tbhdu1.data._coldefs.columns[1].array[0] = 500 assert tbhdu1.data[0][1] == 500 assert tbhdu1.data._coldefs._arrays[1][0] == 500 assert tbhdu1.data._coldefs.columns[1].array[0] == 500 assert tbhdu1.columns._arrays[1][0] == 500 assert tbhdu1.columns.columns[1].array[0] == 500 tbhdu1.columns._arrays[1][0] = 600 assert tbhdu1.data[0][1] == 600 assert tbhdu1.data._coldefs._arrays[1][0] == 600 assert tbhdu1.data._coldefs.columns[1].array[0] == 600 assert tbhdu1.columns._arrays[1][0] == 600 assert tbhdu1.columns.columns[1].array[0] == 600 tbhdu1.columns.columns[1].array[0] = 800 assert tbhdu1.data[0][1] == 800 assert tbhdu1.data._coldefs._arrays[1][0] == 800 assert tbhdu1.data._coldefs.columns[1].array[0] == 800 assert tbhdu1.columns._arrays[1][0] == 800 assert tbhdu1.columns.columns[1].array[0] == 800 tbhdu1.writeto(self.temp("table1.fits")) t1 = fits.open(self.temp("table1.fits")) t1[1].data[0][1] = 213 assert t1[1].data[0][1] == 213 assert t1[1].data._coldefs._arrays[1][0] == 213 assert t1[1].data._coldefs.columns[1].array[0] == 213 assert t1[1].columns._arrays[1][0] == 213 assert t1[1].columns.columns[1].array[0] == 213 t1[1].data._coldefs._arrays[1][0] = 100 assert t1[1].data[0][1] == 100 assert t1[1].data._coldefs._arrays[1][0] == 100 assert t1[1].data._coldefs.columns[1].array[0] == 100 assert t1[1].columns._arrays[1][0] == 100 assert t1[1].columns.columns[1].array[0] == 100 t1[1].data._coldefs.columns[1].array[0] = 500 assert t1[1].data[0][1] == 500 assert t1[1].data._coldefs._arrays[1][0] == 500 assert t1[1].data._coldefs.columns[1].array[0] == 500 assert t1[1].columns._arrays[1][0] == 500 assert t1[1].columns.columns[1].array[0] == 500 t1[1].columns._arrays[1][0] = 600 assert t1[1].data[0][1] == 600 assert t1[1].data._coldefs._arrays[1][0] == 600 assert t1[1].data._coldefs.columns[1].array[0] == 600 assert t1[1].columns._arrays[1][0] == 600 assert t1[1].columns.columns[1].array[0] == 600 t1[1].columns.columns[1].array[0] = 800 assert t1[1].data[0][1] == 800 assert t1[1].data._coldefs._arrays[1][0] == 800 assert t1[1].data._coldefs.columns[1].array[0] == 800 assert t1[1].columns._arrays[1][0] == 800 assert t1[1].columns.columns[1].array[0] == 800 t1.close() def test_new_table_with_fits_rec(self): counts = np.array([312, 334, 308, 317]) names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"]) c1 = fits.Column(name="target", format="10A", array=names) c2 = fits.Column(name="counts", format="J", unit="DN", array=counts) c3 = fits.Column(name="notes", format="A10") c4 = fits.Column(name="spectrum", format="5E") c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1]) coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu = fits.BinTableHDU.from_columns(coldefs) tbhdu.data[0][1] = 213 assert tbhdu.data[0][1] == 213 assert tbhdu.data._coldefs._arrays[1][0] == 213 assert tbhdu.data._coldefs.columns[1].array[0] == 213 assert tbhdu.columns._arrays[1][0] == 213 assert tbhdu.columns.columns[1].array[0] == 213 tbhdu.data._coldefs._arrays[1][0] = 100 assert tbhdu.data[0][1] == 100 assert tbhdu.data._coldefs._arrays[1][0] == 100 assert tbhdu.data._coldefs.columns[1].array[0] == 100 assert tbhdu.columns._arrays[1][0] == 100 assert tbhdu.columns.columns[1].array[0] == 100 tbhdu.data._coldefs.columns[1].array[0] = 500 assert tbhdu.data[0][1] == 500 assert tbhdu.data._coldefs._arrays[1][0] == 500 assert tbhdu.data._coldefs.columns[1].array[0] == 500 assert tbhdu.columns._arrays[1][0] == 500 assert tbhdu.columns.columns[1].array[0] == 500 tbhdu.columns._arrays[1][0] = 600 assert tbhdu.data[0][1] == 600 assert tbhdu.data._coldefs._arrays[1][0] == 600 assert tbhdu.data._coldefs.columns[1].array[0] == 600 assert tbhdu.columns._arrays[1][0] == 600 assert tbhdu.columns.columns[1].array[0] == 600 tbhdu.columns.columns[1].array[0] = 800 assert tbhdu.data[0][1] == 800 assert tbhdu.data._coldefs._arrays[1][0] == 800 assert tbhdu.data._coldefs.columns[1].array[0] == 800 assert tbhdu.columns._arrays[1][0] == 800 assert tbhdu.columns.columns[1].array[0] == 800 tbhdu.columns.columns[1].array[0] = 312 tbhdu.writeto(self.temp("table1.fits")) t1 = fits.open(self.temp("table1.fits")) t1[1].data[0][1] = 1 fr = t1[1].data assert t1[1].data[0][1] == 1 assert t1[1].data._coldefs._arrays[1][0] == 1 assert t1[1].data._coldefs.columns[1].array[0] == 1 assert t1[1].columns._arrays[1][0] == 1 assert t1[1].columns.columns[1].array[0] == 1 assert fr[0][1] == 1 assert fr._coldefs._arrays[1][0] == 1 assert fr._coldefs.columns[1].array[0] == 1 fr._coldefs.columns[1].array[0] = 312 tbhdu1 = fits.BinTableHDU.from_columns(fr) i = 0 for row in tbhdu1.data: for j in range(len(row)): if isinstance(row[j], np.ndarray): assert (row[j] == tbhdu.data[i][j]).all() else: assert row[j] == tbhdu.data[i][j] i = i + 1 tbhdu1.data[0][1] = 213 assert t1[1].data[0][1] == 312 assert t1[1].data._coldefs._arrays[1][0] == 312 assert t1[1].data._coldefs.columns[1].array[0] == 312 assert t1[1].columns._arrays[1][0] == 312 assert t1[1].columns.columns[1].array[0] == 312 assert fr[0][1] == 312 assert fr._coldefs._arrays[1][0] == 312 assert fr._coldefs.columns[1].array[0] == 312 assert tbhdu1.data[0][1] == 213 assert tbhdu1.data._coldefs._arrays[1][0] == 213 assert tbhdu1.data._coldefs.columns[1].array[0] == 213 assert tbhdu1.columns._arrays[1][0] == 213 assert tbhdu1.columns.columns[1].array[0] == 213 t1[1].data[0][1] = 10 assert t1[1].data[0][1] == 10 assert t1[1].data._coldefs._arrays[1][0] == 10 assert t1[1].data._coldefs.columns[1].array[0] == 10 assert t1[1].columns._arrays[1][0] == 10 assert t1[1].columns.columns[1].array[0] == 10 assert fr[0][1] == 10 assert fr._coldefs._arrays[1][0] == 10 assert fr._coldefs.columns[1].array[0] == 10 assert tbhdu1.data[0][1] == 213 assert tbhdu1.data._coldefs._arrays[1][0] == 213 assert tbhdu1.data._coldefs.columns[1].array[0] == 213 assert tbhdu1.columns._arrays[1][0] == 213 assert tbhdu1.columns.columns[1].array[0] == 213 tbhdu1.data._coldefs._arrays[1][0] = 666 assert t1[1].data[0][1] == 10 assert t1[1].data._coldefs._arrays[1][0] == 10 assert t1[1].data._coldefs.columns[1].array[0] == 10 assert t1[1].columns._arrays[1][0] == 10 assert t1[1].columns.columns[1].array[0] == 10 assert fr[0][1] == 10 assert fr._coldefs._arrays[1][0] == 10 assert fr._coldefs.columns[1].array[0] == 10 assert tbhdu1.data[0][1] == 666 assert tbhdu1.data._coldefs._arrays[1][0] == 666 assert tbhdu1.data._coldefs.columns[1].array[0] == 666 assert tbhdu1.columns._arrays[1][0] == 666 assert tbhdu1.columns.columns[1].array[0] == 666 t1.close() def test_bin_table_hdu_constructor(self): counts = np.array([312, 334, 308, 317]) names = np.array(["NGC1", "NGC2", "NGC3", "NCG4"]) c1 = fits.Column(name="target", format="10A", array=names) c2 = fits.Column(name="counts", format="J", unit="DN", array=counts) c3 = fits.Column(name="notes", format="A10") c4 = fits.Column(name="spectrum", format="5E") c5 = fits.Column(name="flag", format="L", array=[1, 0, 1, 1]) coldefs = fits.ColDefs([c1, c2, c3, c4, c5]) tbhdu1 = fits.BinTableHDU.from_columns(coldefs) hdu = fits.BinTableHDU(tbhdu1.data) # Verify that all ndarray objects within the HDU reference the # same ndarray. assert id(hdu.data._coldefs.columns[0].array) == id( hdu.data._coldefs._arrays[0] ) assert id(hdu.data._coldefs.columns[0].array) == id( hdu.columns.columns[0].array ) assert id(hdu.data._coldefs.columns[0].array) == id(hdu.columns._arrays[0]) # Verify that the references in the original HDU are the same as the # references in the new HDU. assert id(tbhdu1.data._coldefs.columns[0].array) == id( hdu.data._coldefs._arrays[0] ) # Verify that a change in the new HDU is reflected in both the new # and original HDU. hdu.data[0][1] = 213 assert hdu.data[0][1] == 213 assert hdu.data._coldefs._arrays[1][0] == 213 assert hdu.data._coldefs.columns[1].array[0] == 213 assert hdu.columns._arrays[1][0] == 213 assert hdu.columns.columns[1].array[0] == 213 assert tbhdu1.data[0][1] == 213 assert tbhdu1.data._coldefs._arrays[1][0] == 213 assert tbhdu1.data._coldefs.columns[1].array[0] == 213 assert tbhdu1.columns._arrays[1][0] == 213 assert tbhdu1.columns.columns[1].array[0] == 213 hdu.data._coldefs._arrays[1][0] = 100 assert hdu.data[0][1] == 100 assert hdu.data._coldefs._arrays[1][0] == 100 assert hdu.data._coldefs.columns[1].array[0] == 100 assert hdu.columns._arrays[1][0] == 100 assert hdu.columns.columns[1].array[0] == 100 assert tbhdu1.data[0][1] == 100 assert tbhdu1.data._coldefs._arrays[1][0] == 100 assert tbhdu1.data._coldefs.columns[1].array[0] == 100 assert tbhdu1.columns._arrays[1][0] == 100 assert tbhdu1.columns.columns[1].array[0] == 100 hdu.data._coldefs.columns[1].array[0] = 500 assert hdu.data[0][1] == 500 assert hdu.data._coldefs._arrays[1][0] == 500 assert hdu.data._coldefs.columns[1].array[0] == 500 assert hdu.columns._arrays[1][0] == 500 assert hdu.columns.columns[1].array[0] == 500 assert tbhdu1.data[0][1] == 500 assert tbhdu1.data._coldefs._arrays[1][0] == 500 assert tbhdu1.data._coldefs.columns[1].array[0] == 500 assert tbhdu1.columns._arrays[1][0] == 500 assert tbhdu1.columns.columns[1].array[0] == 500 hdu.columns._arrays[1][0] = 600 assert hdu.data[0][1] == 600 assert hdu.data._coldefs._arrays[1][0] == 600 assert hdu.data._coldefs.columns[1].array[0] == 600 assert hdu.columns._arrays[1][0] == 600 assert hdu.columns.columns[1].array[0] == 600 assert tbhdu1.data[0][1] == 600 assert tbhdu1.data._coldefs._arrays[1][0] == 600 assert tbhdu1.data._coldefs.columns[1].array[0] == 600 assert tbhdu1.columns._arrays[1][0] == 600 assert tbhdu1.columns.columns[1].array[0] == 600 hdu.columns.columns[1].array[0] = 800 assert hdu.data[0][1] == 800 assert hdu.data._coldefs._arrays[1][0] == 800 assert hdu.data._coldefs.columns[1].array[0] == 800 assert hdu.columns._arrays[1][0] == 800 assert hdu.columns.columns[1].array[0] == 800 assert tbhdu1.data[0][1] == 800 assert tbhdu1.data._coldefs._arrays[1][0] == 800 assert tbhdu1.data._coldefs.columns[1].array[0] == 800 assert tbhdu1.columns._arrays[1][0] == 800 assert tbhdu1.columns.columns[1].array[0] == 800 def test_constructor_name_arg(self): """testConstructorNameArg Passing name='...' to the BinTableHDU and TableHDU constructors should set the .name attribute and 'EXTNAME' header keyword, and override any name in an existing 'EXTNAME' value. """ for hducls in [fits.BinTableHDU, fits.TableHDU]: # First test some default assumptions hdu = hducls() assert hdu.name == "" assert "EXTNAME" not in hdu.header hdu.name = "FOO" assert hdu.name == "FOO" assert hdu.header["EXTNAME"] == "FOO" # Passing name to constructor hdu = hducls(name="FOO") assert hdu.name == "FOO" assert hdu.header["EXTNAME"] == "FOO" # And overriding a header with a different extname hdr = fits.Header() hdr["EXTNAME"] = "EVENTS" hdu = hducls(header=hdr, name="FOO") assert hdu.name == "FOO" assert hdu.header["EXTNAME"] == "FOO" def test_constructor_ver_arg(self): for hducls in [fits.BinTableHDU, fits.TableHDU]: # First test some default assumptions hdu = hducls() assert hdu.ver == 1 assert "EXTVER" not in hdu.header hdu.ver = 2 assert hdu.ver == 2 assert hdu.header["EXTVER"] == 2 # Passing name to constructor hdu = hducls(ver=3) assert hdu.ver == 3 assert hdu.header["EXTVER"] == 3 # And overriding a header with a different extver hdr = fits.Header() hdr["EXTVER"] = 4 hdu = hducls(header=hdr, ver=5) assert hdu.ver == 5 assert hdu.header["EXTVER"] == 5 def test_unicode_colname(self): """ Regression test for https://github.com/astropy/astropy/issues/5204 "Handle unicode FITS BinTable column names on Python 2" """ col = fits.Column(name="spam", format="E", array=[42.0]) # This used to raise a TypeError, now it works fits.BinTableHDU.from_columns([col]) def test_bin_table_with_logical_array(self): c1 = fits.Column(name="flag", format="2L", array=[[True, False], [False, True]]) coldefs = fits.ColDefs([c1]) tbhdu1 = fits.BinTableHDU.from_columns(coldefs) assert ( tbhdu1.data.field("flag")[0] == np.array([True, False], dtype=bool) ).all() assert ( tbhdu1.data.field("flag")[1] == np.array([False, True], dtype=bool) ).all() tbhdu = fits.BinTableHDU.from_columns(tbhdu1.data) assert ( tbhdu.data.field("flag")[0] == np.array([True, False], dtype=bool) ).all() assert ( tbhdu.data.field("flag")[1] == np.array([False, True], dtype=bool) ).all() def test_fits_rec_column_access(self): tbdata = fits.getdata(self.data("table.fits")) assert (tbdata.V_mag == tbdata.field("V_mag")).all() assert (tbdata.V_mag == tbdata["V_mag"]).all() # Table with scaling (c3) and tnull (c1) tbdata = fits.getdata(self.data("tb.fits")) for col in ("c1", "c2", "c3", "c4"): data = getattr(tbdata, col) assert (data == tbdata.field(col)).all() assert (data == tbdata[col]).all() # ascii table tbdata = fits.getdata(self.data("ascii.fits")) for col in ("a", "b"): data = getattr(tbdata, col) assert (data == tbdata.field(col)).all() assert (data == tbdata[col]).all() # with VLA column col1 = fits.Column( name="x", format="PI()", array=np.array([[45, 56], [11, 12, 13]], dtype=np.object_), ) hdu = fits.BinTableHDU.from_columns([col1]) assert type(hdu.data["x"]) == type(hdu.data.x) assert (hdu.data["x"][0] == hdu.data.x[0]).all() assert (hdu.data["x"][1] == hdu.data.x[1]).all() def test_table_with_zero_width_column(self): hdul = fits.open(self.data("zerowidth.fits")) tbhdu = hdul[2] # This HDU contains a zero-width column 'ORBPARM' assert "ORBPARM" in tbhdu.columns.names # The ORBPARM column should not be in the data, though the data should # be readable assert "ORBPARM" in tbhdu.data.names assert "ORBPARM" in tbhdu.data.dtype.names # Verify that some of the data columns are still correctly accessible # by name assert tbhdu.data[0]["ANNAME"] == "VLA:_W16" assert comparefloats( tbhdu.data[0]["STABXYZ"], np.array([499.85566663, -1317.99231554, -735.18866164], dtype=np.float64), ) assert tbhdu.data[0]["NOSTA"] == 1 assert tbhdu.data[0]["MNTSTA"] == 0 assert tbhdu.data[-1]["ANNAME"] == "VPT:_OUT" assert comparefloats( tbhdu.data[-1]["STABXYZ"], np.array([0.0, 0.0, 0.0], dtype=np.float64) ) assert tbhdu.data[-1]["NOSTA"] == 29 assert tbhdu.data[-1]["MNTSTA"] == 0 hdul.writeto(self.temp("newtable.fits")) hdul.close() hdul = fits.open(self.temp("newtable.fits")) tbhdu = hdul[2] # Verify that the previous tests still hold after writing assert "ORBPARM" in tbhdu.columns.names assert "ORBPARM" in tbhdu.data.names assert "ORBPARM" in tbhdu.data.dtype.names assert tbhdu.data[0]["ANNAME"] == "VLA:_W16" assert comparefloats( tbhdu.data[0]["STABXYZ"], np.array([499.85566663, -1317.99231554, -735.18866164], dtype=np.float64), ) assert tbhdu.data[0]["NOSTA"] == 1 assert tbhdu.data[0]["MNTSTA"] == 0 assert tbhdu.data[-1]["ANNAME"] == "VPT:_OUT" assert comparefloats( tbhdu.data[-1]["STABXYZ"], np.array([0.0, 0.0, 0.0], dtype=np.float64) ) assert tbhdu.data[-1]["NOSTA"] == 29 assert tbhdu.data[-1]["MNTSTA"] == 0 hdul.close() def test_string_column_padding(self): a = ["img1", "img2", "img3a", "p"] s = ( "img1\x00\x00\x00\x00\x00\x00" "img2\x00\x00\x00\x00\x00\x00" "img3a\x00\x00\x00\x00\x00" "p\x00\x00\x00\x00\x00\x00\x00\x00\x00" ) acol = fits.Column(name="MEMNAME", format="A10", array=chararray.array(a)) ahdu = fits.BinTableHDU.from_columns([acol]) assert ahdu.data.tobytes().decode("raw-unicode-escape") == s ahdu.writeto(self.temp("newtable.fits")) with fits.open(self.temp("newtable.fits")) as hdul: assert hdul[1].data.tobytes().decode("raw-unicode-escape") == s assert (hdul[1].data["MEMNAME"] == a).all() del hdul ahdu = fits.TableHDU.from_columns([acol]) ahdu.writeto(self.temp("newtable.fits"), overwrite=True) with fits.open(self.temp("newtable.fits")) as hdul: assert hdul[1].data.tobytes().decode("raw-unicode-escape") == s.replace( "\x00", " " ) assert (hdul[1].data["MEMNAME"] == a).all() ahdu = fits.BinTableHDU.from_columns(hdul[1].data.copy()) del hdul # Now serialize once more as a binary table; padding bytes should # revert to zeroes ahdu.writeto(self.temp("newtable.fits"), overwrite=True) with fits.open(self.temp("newtable.fits")) as hdul: assert hdul[1].data.tobytes().decode("raw-unicode-escape") == s assert (hdul[1].data["MEMNAME"] == a).all() def test_multi_dimensional_columns(self): """ Tests the multidimensional column implementation with both numeric arrays and string arrays. """ data = np.rec.array( [ ([0, 1, 2, 3, 4, 5], "row1" * 2), ([6, 7, 8, 9, 0, 1], "row2" * 2), ([2, 3, 4, 5, 6, 7], "row3" * 2), ], formats="6i4,a8", ) thdu = fits.BinTableHDU.from_columns(data) thdu.writeto(self.temp("newtable.fits")) with fits.open(self.temp("newtable.fits"), mode="update") as hdul: # Modify the TDIM fields to my own specification hdul[1].header["TDIM1"] = "(2,3)" hdul[1].header["TDIM2"] = "(4,2)" with fits.open(self.temp("newtable.fits")) as hdul: thdu = hdul[1] c1 = thdu.data.field(0) c2 = thdu.data.field(1) assert c1.shape == (3, 3, 2) assert c2.shape == (3, 2) assert ( c1 == np.array( [ [[0, 1], [2, 3], [4, 5]], [[6, 7], [8, 9], [0, 1]], [[2, 3], [4, 5], [6, 7]], ] ) ).all() assert ( c2 == np.array([["row1", "row1"], ["row2", "row2"], ["row3", "row3"]]) ).all() del c1 del c2 del thdu del hdul # Test setting the TDIMn header based on the column data data = np.zeros(3, dtype=[("x", "f4"), ("s", "S5", 4)]) data["x"] = 1, 2, 3 data["s"] = "ok" fits.writeto(self.temp("newtable.fits"), data, overwrite=True) t = fits.getdata(self.temp("newtable.fits")) assert t.field(1).dtype.str[-1] == "5" assert t.field(1).shape == (3, 4) # Like the previous test, but with an extra dimension (a bit more # complicated) data = np.zeros(3, dtype=[("x", "f4"), ("s", "S5", (4, 3))]) data["x"] = 1, 2, 3 data["s"] = "ok" del t fits.writeto(self.temp("newtable.fits"), data, overwrite=True) t = fits.getdata(self.temp("newtable.fits")) assert t.field(1).dtype.str[-1] == "5" assert t.field(1).shape == (3, 4, 3) def test_oned_array_single_element(self): # a table with rows that are 1d arrays of a single value data = np.array([(1,), (2,)], dtype=([("x", "i4", (1,))])) thdu = fits.BinTableHDU.from_columns(data) thdu.writeto(self.temp("onedtable.fits")) with fits.open(self.temp("onedtable.fits")) as hdul: thdu = hdul[1] c = thdu.data.field(0) assert c.shape == (2, 1) assert thdu.header["TDIM1"] == "(1)" def test_bin_table_init_from_string_array_column(self): """ Tests two ways of creating a new `BinTableHDU` from a column of string arrays. This tests for a couple different regressions, and ensures that both BinTableHDU(data=arr) and BinTableHDU.from_columns(arr) work equivalently. Some of this is redundant with the following test, but checks some subtly different cases. """ data = [[b"abcd", b"efgh"], [b"ijkl", b"mnop"], [b"qrst", b"uvwx"]] arr = np.array( [(data,), (data,), (data,), (data,), (data,)], dtype=[("S", "(3, 2)S4")] ) tbhdu1 = fits.BinTableHDU(data=arr) def test_dims_and_roundtrip(tbhdu): assert tbhdu.data["S"].shape == (5, 3, 2) assert tbhdu.data["S"].dtype.str.endswith("U4") tbhdu.writeto(self.temp("test.fits"), overwrite=True) with fits.open(self.temp("test.fits")) as hdul: tbhdu2 = hdul[1] assert tbhdu2.header["TDIM1"] == "(4,2,3)" assert tbhdu2.data["S"].shape == (5, 3, 2) assert tbhdu.data["S"].dtype.str.endswith("U4") assert np.all(tbhdu2.data["S"] == tbhdu.data["S"]) test_dims_and_roundtrip(tbhdu1) tbhdu2 = fits.BinTableHDU.from_columns(arr) test_dims_and_roundtrip(tbhdu2) def test_columns_with_truncating_tdim(self): """ According to the FITS standard (section 7.3.2): If the number of elements in the array implied by the TDIMn is less than the allocated size of the ar- ray in the FITS file, then the unused trailing elements should be interpreted as containing undefined fill values. *deep sigh* What this means is if a column has a repeat count larger than the number of elements indicated by its TDIM (ex: TDIM1 = '(2,2)', but TFORM1 = 6I), then instead of this being an outright error we are to take the first 4 elements as implied by the TDIM and ignore the additional two trailing elements. """ # It's hard to even successfully create a table like this. I think # it *should* be difficult, but once created it should at least be # possible to read. arr1 = [[b"ab", b"cd"], [b"ef", b"gh"], [b"ij", b"kl"]] arr2 = [1, 2, 3, 4, 5] arr = np.array( [(arr1, arr2), (arr1, arr2)], dtype=[("a", "(3, 2)S2"), ("b", "5i8")] ) tbhdu = fits.BinTableHDU(data=arr) tbhdu.writeto(self.temp("test.fits")) with open(self.temp("test.fits"), "rb") as f: raw_bytes = f.read() # Artificially truncate TDIM in the header; this seems to be the # easiest way to do this while getting around Astropy's insistence on the # data and header matching perfectly; again, we have no interest in # making it possible to write files in this format, only read them with open(self.temp("test.fits"), "wb") as f: f.write(raw_bytes.replace(b"(2,2,3)", b"(2,2,2)")) with fits.open(self.temp("test.fits")) as hdul: tbhdu2 = hdul[1] assert tbhdu2.header["TDIM1"] == "(2,2,2)" assert tbhdu2.header["TFORM1"] == "12A" for row in tbhdu2.data: assert np.all(row["a"] == [["ab", "cd"], ["ef", "gh"]]) assert np.all(row["b"] == [1, 2, 3, 4, 5]) def test_string_array_round_trip(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/201""" data = [["abc", "def", "ghi"], ["jkl", "mno", "pqr"], ["stu", "vwx", "yz "]] recarr = np.rec.array([(data,), (data,)], formats=["(3,3)S3"]) t = fits.BinTableHDU(data=recarr) t.writeto(self.temp("test.fits")) with fits.open(self.temp("test.fits")) as h: assert "TDIM1" in h[1].header assert h[1].header["TDIM1"] == "(3,3,3)" assert len(h[1].data) == 2 assert len(h[1].data[0]) == 1 assert ( h[1].data.field(0)[0] == np.char.decode(recarr.field(0)[0], "ascii") ).all() with fits.open(self.temp("test.fits")) as h: # Access the data; I think this is necessary to exhibit the bug # reported in https://aeon.stsci.edu/ssb/trac/pyfits/ticket/201 h[1].data[:] h.writeto(self.temp("test2.fits")) with fits.open(self.temp("test2.fits")) as h: assert "TDIM1" in h[1].header assert h[1].header["TDIM1"] == "(3,3,3)" assert len(h[1].data) == 2 assert len(h[1].data[0]) == 1 assert ( h[1].data.field(0)[0] == np.char.decode(recarr.field(0)[0], "ascii") ).all() def test_new_table_with_nd_column(self): """Regression test for https://github.com/spacetelescope/PyFITS/issues/3 """ arra = np.array(["a", "b"], dtype="|S1") arrb = np.array([["a", "bc"], ["cd", "e"]], dtype="|S2") arrc = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) cols = [ fits.Column(name="str", format="1A", array=arra), fits.Column(name="strarray", format="4A", dim="(2,2)", array=arrb), fits.Column(name="intarray", format="4I", dim="(2, 2)", array=arrc), ] hdu = fits.BinTableHDU.from_columns(fits.ColDefs(cols)) hdu.writeto(self.temp("test.fits")) with fits.open(self.temp("test.fits")) as h: # Need to force string arrays to byte arrays in order to compare # correctly on Python 3 assert (h[1].data["str"].encode("ascii") == arra).all() assert (h[1].data["strarray"].encode("ascii") == arrb).all() assert (h[1].data["intarray"] == arrc).all() def test_mismatched_tform_and_tdim(self): """Normally the product of the dimensions listed in a TDIMn keyword must be less than or equal to the repeat count in the TFORMn keyword. This tests that this works if less than (treating the trailing bytes as unspecified fill values per the FITS standard) and fails if the dimensions specified by TDIMn are greater than the repeat count. """ arra = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) arrb = np.array([[[9, 10], [11, 12]], [[13, 14], [15, 16]]]) cols = [ fits.Column(name="a", format="20I", dim="(2,2)", array=arra), fits.Column(name="b", format="4I", dim="(2,2)", array=arrb), ] # The first column has the mismatched repeat count hdu = fits.BinTableHDU.from_columns(fits.ColDefs(cols)) hdu.writeto(self.temp("test.fits")) with fits.open(self.temp("test.fits")) as h: assert h[1].header["TFORM1"] == "20I" assert h[1].header["TFORM2"] == "4I" assert h[1].header["TDIM1"] == h[1].header["TDIM2"] == "(2,2)" assert (h[1].data["a"] == arra).all() assert (h[1].data["b"] == arrb).all() assert h[1].data.itemsize == 48 # 16-bits times 24 # If dims is more than the repeat count in the format specifier raise # an error pytest.raises( VerifyError, fits.Column, name="a", format="2I", dim="(2,2)", array=arra ) def test_tdim_of_size_one(self): """Regression test for https://github.com/astropy/astropy/pull/3580""" with fits.open(self.data("tdim.fits")) as hdulist: assert hdulist[1].data["V_mag"].shape == (3, 1, 1) def test_slicing(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/52""" with fits.open(self.data("table.fits")) as f: data = f[1].data targets = data.field("target") s = data[:] assert (s.field("target") == targets).all() for n in range(len(targets) + 2): s = data[:n] assert (s.field("target") == targets[:n]).all() s = data[n:] assert (s.field("target") == targets[n:]).all() s = data[::2] assert (s.field("target") == targets[::2]).all() s = data[::-1] assert (s.field("target") == targets[::-1]).all() def test_array_slicing(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/55""" with fits.open(self.data("table.fits")) as f: data = f[1].data s1 = data[data["target"] == "NGC1001"] s2 = data[np.where(data["target"] == "NGC1001")] s3 = data[[0]] s4 = data[:1] for s in [s1, s2, s3, s4]: assert isinstance(s, fits.FITS_rec) assert comparerecords(s1, s2) assert comparerecords(s2, s3) assert comparerecords(s3, s4) def test_array_broadcasting(self): """ Regression test for https://github.com/spacetelescope/PyFITS/pull/48 """ with fits.open(self.data("table.fits")) as hdu: data = hdu[1].data data["V_mag"] = 0 assert np.all(data["V_mag"] == 0) data["V_mag"] = 1 assert np.all(data["V_mag"] == 1) for container in (list, tuple, np.array): data["V_mag"] = container([1, 2, 3]) assert np.array_equal(data["V_mag"], np.array([1, 2, 3])) def test_array_slicing_readonly(self): """ Like test_array_slicing but with the file opened in 'readonly' mode. Regression test for a crash when slicing readonly memmap'd tables. """ with fits.open(self.data("table.fits"), mode="readonly") as f: data = f[1].data s1 = data[data["target"] == "NGC1001"] s2 = data[np.where(data["target"] == "NGC1001")] s3 = data[[0]] s4 = data[:1] for s in [s1, s2, s3, s4]: assert isinstance(s, fits.FITS_rec) assert comparerecords(s1, s2) assert comparerecords(s2, s3) assert comparerecords(s3, s4) @pytest.mark.parametrize("tablename", ["table.fits", "tb.fits"]) def test_dump_load_round_trip(self, tablename): """ A simple test of the dump/load methods; dump the data, column, and header files and try to reload the table from them. """ with fits.open(self.data(tablename)) as hdul: tbhdu = hdul[1] datafile = self.temp("data.txt") cdfile = self.temp("coldefs.txt") hfile = self.temp("header.txt") tbhdu.dump(datafile, cdfile, hfile) new_tbhdu = fits.BinTableHDU.load(datafile, cdfile, hfile) assert comparerecords(tbhdu.data, new_tbhdu.data) _assert_attr_col(new_tbhdu, hdul[1]) def test_dump_load_array_colums(self): """ Regression test for https://github.com/spacetelescope/PyFITS/issues/22 Ensures that a table containing a multi-value array column can be dumped and loaded successfully. """ data = np.rec.array( [("a", [1, 2, 3, 4], 0.1), ("b", [5, 6, 7, 8], 0.2)], formats="a1,4i4,f8" ) tbhdu = fits.BinTableHDU.from_columns(data) datafile = self.temp("data.txt") cdfile = self.temp("coldefs.txt") hfile = self.temp("header.txt") tbhdu.dump(datafile, cdfile, hfile) new_tbhdu = fits.BinTableHDU.load(datafile, cdfile, hfile) assert comparerecords(tbhdu.data, new_tbhdu.data) assert str(tbhdu.header) == str(new_tbhdu.header) def test_load_guess_format(self): """ Tests loading a table dump with no supplied coldefs or header, so that the table format has to be guessed at. There is of course no exact science to this; the table that's produced simply uses sensible guesses for that format. Ideally this should never have to be used. """ # Create a table containing a variety of data types. a0 = np.array([False, True, False], dtype=bool) c0 = fits.Column(name="c0", format="L", array=a0) # Format X currently not supported by the format # a1 = np.array([[0], [1], [0]], dtype=np.uint8) # c1 = fits.Column(name='c1', format='X', array=a1) a2 = np.array([1, 128, 255], dtype=np.uint8) c2 = fits.Column(name="c2", format="B", array=a2) a3 = np.array([-30000, 1, 256], dtype=np.int16) c3 = fits.Column(name="c3", format="I", array=a3) a4 = np.array([-123123123, 1234, 123123123], dtype=np.int32) c4 = fits.Column(name="c4", format="J", array=a4) a5 = np.array(["a", "abc", "ab"]) c5 = fits.Column(name="c5", format="A3", array=a5) a6 = np.array([1.1, 2.2, 3.3], dtype=np.float64) c6 = fits.Column(name="c6", format="D", array=a6) a7 = np.array([1.1 + 2.2j, 3.3 + 4.4j, 5.5 + 6.6j], dtype=np.complex128) c7 = fits.Column(name="c7", format="M", array=a7) a8 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32) c8 = fits.Column(name="c8", format="PJ()", array=a8) tbhdu = fits.BinTableHDU.from_columns([c0, c2, c3, c4, c5, c6, c7, c8]) datafile = self.temp("data.txt") tbhdu.dump(datafile) new_tbhdu = fits.BinTableHDU.load(datafile) # In this particular case the record data at least should be equivalent assert comparerecords(tbhdu.data, new_tbhdu.data) def test_attribute_field_shadowing(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/86 Numpy recarray objects have a poorly-considered feature of allowing field access by attribute lookup. However, if a field name coincides with an existing attribute/method of the array, the existing name takes presence (making the attribute-based field lookup completely unreliable in general cases). This ensures that any FITS_rec attributes still work correctly even when there is a field with the same name as that attribute. """ c1 = fits.Column(name="names", format="I", array=[1]) c2 = fits.Column(name="formats", format="I", array=[2]) c3 = fits.Column(name="other", format="I", array=[3]) t = fits.BinTableHDU.from_columns([c1, c2, c3]) assert t.data.names == ["names", "formats", "other"] assert t.data.formats == ["I"] * 3 assert (t.data["names"] == [1]).all() assert (t.data["formats"] == [2]).all() assert (t.data.other == [3]).all() def test_table_from_bool_fields(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/113 Tests creating a table from a recarray containing numpy.bool columns. """ array = np.rec.array([(True, False), (False, True)], formats="|b1,|b1") thdu = fits.BinTableHDU.from_columns(array) assert thdu.columns.formats == ["L", "L"] assert comparerecords(thdu.data, array) # Test round trip thdu.writeto(self.temp("table.fits")) data = fits.getdata(self.temp("table.fits"), ext=1) assert thdu.columns.formats == ["L", "L"] assert comparerecords(data, array) def test_table_from_bool_fields2(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/215 Tests the case where a multi-field ndarray (not a recarray) containing a bool field is used to initialize a `BinTableHDU`. """ arr = np.array([(False,), (True,), (False,)], dtype=[("a", "?")]) hdu = fits.BinTableHDU(data=arr) assert (hdu.data["a"] == arr["a"]).all() def test_bool_column_update(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/139""" c1 = fits.Column("F1", "L", array=[True, False]) c2 = fits.Column("F2", "L", array=[False, True]) thdu = fits.BinTableHDU.from_columns(fits.ColDefs([c1, c2])) thdu.writeto(self.temp("table.fits")) with fits.open(self.temp("table.fits"), mode="update") as hdul: hdul[1].data["F1"][1] = True hdul[1].data["F2"][0] = True with fits.open(self.temp("table.fits")) as hdul: assert (hdul[1].data["F1"] == [True, True]).all() assert (hdul[1].data["F2"] == [True, True]).all() def test_missing_tnull(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/197""" c = fits.Column( "F1", "A3", null="---", array=np.array(["1.0", "2.0", "---", "3.0"]), ascii=True, ) table = fits.TableHDU.from_columns([c]) table.writeto(self.temp("test.fits")) # Now let's delete the TNULL1 keyword, making this essentially # unreadable with fits.open(self.temp("test.fits"), mode="update") as h: h[1].header["TFORM1"] = "E3" del h[1].header["TNULL1"] with fits.open(self.temp("test.fits")) as h: pytest.raises(ValueError, lambda: h[1].data["F1"]) try: with fits.open(self.temp("test.fits")) as h: h[1].data["F1"] except ValueError as e: assert str(e).endswith( "the header may be missing the necessary TNULL1 " "keyword or the table contains invalid data" ) def test_blank_field_zero(self): """Regression test for https://github.com/astropy/astropy/issues/5134 Blank values in numerical columns of ASCII tables should be replaced with zeros, so they can be loaded into numpy arrays. When a TNULL value is set and there are blank fields not equal to that value, they should be replaced with zeros. """ # Test an integer column with blank string as null nullval1 = " " c1 = fits.Column( "F1", format="I8", null=nullval1, array=np.array([0, 1, 2, 3, 4]), ascii=True, ) table = fits.TableHDU.from_columns([c1]) table.writeto(self.temp("ascii_null.fits")) # Replace the 1st col, 3rd row, with a null field. with open(self.temp("ascii_null.fits"), mode="r+") as h: nulled = h.read().replace("2 ", " ") h.seek(0) h.write(nulled) with fits.open(self.temp("ascii_null.fits"), memmap=True) as f: assert f[1].data[2][0] == 0 # Test a float column with a null value set and blank fields. nullval2 = "NaN" c2 = fits.Column( "F1", format="F12.8", null=nullval2, array=np.array([1.0, 2.0, 3.0, 4.0]), ascii=True, ) table = fits.TableHDU.from_columns([c2]) table.writeto(self.temp("ascii_null2.fits")) # Replace the 1st col, 3rd row, with a null field. with open(self.temp("ascii_null2.fits"), mode="r+") as h: nulled = h.read().replace("3.00000000", " ") h.seek(0) h.write(nulled) with fits.open(self.temp("ascii_null2.fits"), memmap=True) as f: # (Currently it should evaluate to 0.0, but if a TODO in fitsrec is # completed, then it should evaluate to NaN.) assert f[1].data[2][0] == 0.0 or np.isnan(f[1].data[2][0]) def test_column_array_type_mismatch(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/218""" arr = [-99] * 20 col = fits.Column("mag", format="E", array=arr) assert (arr == col.array).all() def test_table_none(self): """Regression test for https://github.com/spacetelescope/PyFITS/issues/27 """ with fits.open(self.data("tb.fits")) as h: h[1].data h[1].data = None assert isinstance(h[1].data, fits.FITS_rec) assert len(h[1].data) == 0 h[1].writeto(self.temp("test.fits")) with fits.open(self.temp("test.fits")) as h: assert h[1].header["NAXIS"] == 2 assert h[1].header["NAXIS1"] == 12 assert h[1].header["NAXIS2"] == 0 assert isinstance(h[1].data, fits.FITS_rec) assert len(h[1].data) == 0 def test_unncessary_table_load(self): """Test unnecessary parsing and processing of FITS tables when writing directly from one FITS file to a new file without first reading the data for user manipulation. In other words, it should be possible to do a direct copy of the raw data without unnecessary processing of the data. """ with fits.open(self.data("table.fits")) as h: h[1].writeto(self.temp("test.fits")) # Since this was a direct copy the h[1].data attribute should not have # even been accessed (since this means the data was read and parsed) assert "data" not in h[1].__dict__ with fits.open(self.data("table.fits")) as h1: with fits.open(self.temp("test.fits")) as h2: assert str(h1[1].header) == str(h2[1].header) assert comparerecords(h1[1].data, h2[1].data) def test_table_from_columns_of_other_table(self): """Tests a rare corner case where the columns of an existing table are used to create a new table with the new_table function. In this specific case, however, the existing table's data has not been read yet, so new_table has to get at it through the Delayed proxy. Note: Although this previously tested new_table it now uses BinTableHDU.from_columns directly, around which new_table is a mere wrapper. """ hdul = fits.open(self.data("table.fits")) # Make sure the column array is in fact delayed... assert isinstance(hdul[1].columns._arrays[0], Delayed) # Create a new table... t = fits.BinTableHDU.from_columns(hdul[1].columns) # The original columns should no longer be delayed... assert not isinstance(hdul[1].columns._arrays[0], Delayed) t.writeto(self.temp("test.fits")) with fits.open(self.temp("test.fits")) as hdul2: assert comparerecords(hdul[1].data, hdul2[1].data) hdul.close() def test_bintable_to_asciitable(self): """Tests initializing a TableHDU with the data from a BinTableHDU.""" with fits.open(self.data("tb.fits")) as hdul: tbdata = hdul[1].data tbhdu = fits.TableHDU(data=tbdata) tbhdu.writeto(self.temp("test.fits"), overwrite=True) with fits.open(self.temp("test.fits")) as hdul2: tbdata2 = hdul2[1].data assert np.all(tbdata["c1"] == tbdata2["c1"]) assert np.all(tbdata["c2"] == tbdata2["c2"]) # c3 gets converted from float32 to float64 when writing # test.fits, so cast to float32 before testing that the correct # value is retrieved assert np.all( tbdata["c3"].astype(np.float32) == tbdata2["c3"].astype(np.float32) ) # c4 is a boolean column in the original table; we want ASCII # columns to convert these to columns of 'T'/'F' strings assert np.all(np.where(tbdata["c4"], "T", "F") == tbdata2["c4"]) def test_pickle(self): """ Regression test for https://github.com/astropy/astropy/issues/1597 Tests for pickling FITS_rec objects """ # open existing FITS tables (images pickle by default, no test needed): with fits.open(self.data("tb.fits")) as btb: # Test column array is delayed and can pickle assert isinstance(btb[1].columns._arrays[0], Delayed) btb_pd = pickle.dumps(btb[1].data) btb_pl = pickle.loads(btb_pd) # It should not be delayed any more assert not isinstance(btb[1].columns._arrays[0], Delayed) assert comparerecords(btb_pl, btb[1].data) with fits.open(self.data("ascii.fits")) as asc: asc_pd = pickle.dumps(asc[1].data) asc_pl = pickle.loads(asc_pd) assert comparerecords(asc_pl, asc[1].data) with fits.open(self.data("random_groups.fits")) as rgr: rgr_pd = pickle.dumps(rgr[0].data) rgr_pl = pickle.loads(rgr_pd) assert comparerecords(rgr_pl, rgr[0].data) with fits.open(self.data("zerowidth.fits")) as zwc: # Doesn't pickle zero-width (_phanotm) column 'ORBPARM' zwc_pd = pickle.dumps(zwc[2].data) zwc_pl = pickle.loads(zwc_pd) with pytest.warns(UserWarning, match="Field 2 has a repeat count of 0"): assert comparerecords(zwc_pl, zwc[2].data) def test_zero_length_table(self): array = np.array([], dtype=[("a", "i8"), ("b", "S64"), ("c", ("i4", (3, 2)))]) hdu = fits.BinTableHDU(array) assert hdu.header["NAXIS1"] == 96 assert hdu.header["NAXIS2"] == 0 assert hdu.header["TDIM3"] == "(2,3)" field = hdu.data.field(1) assert field.shape == (0,) def test_dim_column_byte_order_mismatch(self): """ When creating a table column with non-trivial TDIMn, and big-endian array data read from an existing FITS file, the data should not be unnecessarily byteswapped. Regression test for https://github.com/astropy/astropy/issues/3561 """ data = fits.getdata(self.data("random_groups.fits"))["DATA"] col = fits.Column(name="TEST", array=data, dim="(3,1,128,1,1)", format="1152E") thdu = fits.BinTableHDU.from_columns([col]) thdu.writeto(self.temp("test.fits")) with fits.open(self.temp("test.fits")) as hdul: assert np.all(hdul[1].data["TEST"] == data) def test_fits_rec_from_existing(self): """ Tests creating a `FITS_rec` object with `FITS_rec.from_columns` from an existing `FITS_rec` object read from a FITS file. This ensures that the per-column arrays are updated properly. Regression test for https://github.com/spacetelescope/PyFITS/issues/99 """ # The use case that revealed this problem was trying to create a new # table from an existing table, but with additional rows so that we can # append data from a second table (with the same column structure) data1 = fits.getdata(self.data("tb.fits")) data2 = fits.getdata(self.data("tb.fits")) nrows = len(data1) + len(data2) merged = fits.FITS_rec.from_columns(data1, nrows=nrows) merged[len(data1) :] = data2 mask = merged["c1"] > 1 masked = merged[mask] # The test table only has two rows, only the second of which is > 1 for # the 'c1' column assert comparerecords(data1[1:], masked[:1]) assert comparerecords(data1[1:], masked[1:]) # Double check that the original data1 table hasn't been affected by # its use in creating the "merged" table assert comparerecords(data1, fits.getdata(self.data("tb.fits"))) def test_update_string_column_inplace(self): """ Regression test for https://github.com/astropy/astropy/issues/4452 Ensure that changes to values in a string column are saved when a file is opened in ``mode='update'``. """ data = np.array([("abc",)], dtype=[("a", "S3")]) fits.writeto(self.temp("test.fits"), data) with fits.open(self.temp("test.fits"), mode="update") as hdul: hdul[1].data["a"][0] = "XYZ" assert hdul[1].data["a"][0] == "XYZ" with fits.open(self.temp("test.fits")) as hdul: assert hdul[1].data["a"][0] == "XYZ" # Test update but with a non-trivial TDIMn data = np.array( [([["abc", "def", "geh"], ["ijk", "lmn", "opq"]],)], dtype=[("a", ("S3", (2, 3)))], ) fits.writeto(self.temp("test2.fits"), data) expected = [["abc", "def", "geh"], ["ijk", "XYZ", "opq"]] with fits.open(self.temp("test2.fits"), mode="update") as hdul: assert hdul[1].header["TDIM1"] == "(3,3,2)" # Note: Previously I wrote data['a'][0][1, 1] to address # the single row. However, this is broken for chararray because # data['a'][0] does *not* return a view of the original array--this # is a bug in chararray though and not a bug in any FITS-specific # code so we'll roll with it for now... # (by the way the bug in question is fixed in newer Numpy versions) hdul[1].data["a"][0, 1, 1] = "XYZ" assert np.all(hdul[1].data["a"][0] == expected) with fits.open(self.temp("test2.fits")) as hdul: assert hdul[1].header["TDIM1"] == "(3,3,2)" assert np.all(hdul[1].data["a"][0] == expected) @pytest.mark.skipif(not HAVE_OBJGRAPH, reason="requires objgraph") def test_reference_leak(self): """Regression test for https://github.com/astropy/astropy/pull/520""" def readfile(filename): with fits.open(filename) as hdul: data = hdul[1].data.copy() for colname in data.dtype.names: data[colname] with _refcounting("FITS_rec"): readfile(self.data("memtest.fits")) @pytest.mark.skipif(not HAVE_OBJGRAPH, reason="requires objgraph") @pytest.mark.slow def test_reference_leak2(self, tmp_path): """ Regression test for https://github.com/astropy/astropy/pull/4539 This actually re-runs a small set of tests that I found, during careful testing, exhibited the reference leaks fixed by #4539, but now with reference counting around each test to ensure that the leaks are fixed. """ from .test_connect import TestMultipleHDU from .test_core import TestCore t1 = TestCore() t1.setup_method() try: with _refcounting("FITS_rec"): t1.test_add_del_columns2() finally: t1.teardown_method() del t1 t2 = self.__class__() for test_name in [ "test_recarray_to_bintablehdu", "test_numpy_ndarray_to_bintablehdu", "test_new_table_from_recarray", "test_new_fitsrec", ]: t2.setup_method() try: with _refcounting("FITS_rec"): getattr(t2, test_name)() finally: t2.teardown_method() del t2 t3 = TestMultipleHDU() t3.setup_class() try: with _refcounting("FITS_rec"): t3.test_read(tmp_path) finally: t3.teardown_class() del t3 def test_dump_overwrite(self): with fits.open(self.data("table.fits")) as hdul: tbhdu = hdul[1] datafile = self.temp("data.txt") cdfile = self.temp("coldefs.txt") hfile = self.temp("header.txt") tbhdu.dump(datafile, cdfile, hfile) msg = ( r"File .* already exists\. File .* already exists\. File " r".* already exists\. If you mean to replace the " r"file\(s\) then use the argument 'overwrite=True'\." ) with pytest.raises(OSError, match=msg): tbhdu.dump(datafile, cdfile, hfile) tbhdu.dump(datafile, cdfile, hfile, overwrite=True) def test_pseudo_unsigned_ints(self): """ Tests updating a table column containing pseudo-unsigned ints. """ data = np.array([1, 2, 3], dtype=np.uint32) col = fits.Column(name="A", format="1J", bzero=2**31, array=data) thdu = fits.BinTableHDU.from_columns([col]) thdu.writeto(self.temp("test.fits")) # Test that the file wrote out correctly with fits.open(self.temp("test.fits"), uint=True) as hdul: hdu = hdul[1] assert "TZERO1" in hdu.header assert hdu.header["TZERO1"] == 2**31 assert hdu.data["A"].dtype == np.dtype("uint32") assert np.all(hdu.data["A"] == data) # Test updating the unsigned int data hdu.data["A"][0] = 99 hdu.writeto(self.temp("test2.fits")) with fits.open(self.temp("test2.fits"), uint=True) as hdul: hdu = hdul[1] assert "TZERO1" in hdu.header assert hdu.header["TZERO1"] == 2**31 assert hdu.data["A"].dtype == np.dtype("uint32") assert np.all(hdu.data["A"] == [99, 2, 3]) def test_column_with_scaling(self): """Check that a scaled column if correctly saved once it is modified. Regression test for https://github.com/astropy/astropy/issues/6887 """ c1 = fits.Column( name="c1", array=np.array([1], dtype=">i2"), format="1I", bscale=1, bzero=32768, ) S = fits.HDUList([fits.PrimaryHDU(), fits.BinTableHDU.from_columns([c1])]) # Change value in memory S[1].data["c1"][0] = 2 S.writeto(self.temp("a.fits")) assert S[1].data["c1"] == 2 # Read and change value in memory with fits.open(self.temp("a.fits")) as X: X[1].data["c1"][0] = 10 assert X[1].data["c1"][0] == 10 # Write back to file X.writeto(self.temp("b.fits")) # Now check the file with fits.open(self.temp("b.fits")) as hdul: assert hdul[1].data["c1"][0] == 10 def test_ascii_inttypes(self): """ Test correct integer dtypes according to ASCII table field widths. Regression for https://github.com/astropy/astropy/issues/9899 """ i08 = np.array([2**3, 2**23, -(2**22), 10, 2**23], dtype="i4") i10 = np.array([2**8, 2**31 - 1, -(2**29), 30, 2**31 - 1], dtype="i8") i20 = np.array([2**16, 2**63 - 1, -(2**63), 40, 2**63 - 1], dtype="i8") i02 = np.array([2**8, 2**13, -(2**9), 50, 2**13], dtype="i2") t0 = Table([i08, i08 * 2, i10, i20, i02]) t1 = Table.read(self.data("ascii_i4-i20.fits")) assert t1.dtype == t0.dtype assert comparerecords(t1, t0) def test_ascii_floattypes(self): """Test different float formats.""" col1 = fits.Column( name="a", format="D", array=np.array([11.1, 12.2]), ascii=True ) col2 = fits.Column( name="b", format="D16", array=np.array([15.5, 16.6]), ascii=True ) col3 = fits.Column( name="c", format="D16.7", array=np.array([1.1, 2.2]), ascii=True ) hdu = fits.TableHDU.from_columns([col1, col2, col3]) hdu.writeto(self.temp("foo.fits")) with fits.open(self.temp("foo.fits"), memmap=False) as hdul: assert comparerecords(hdul[1].data, hdu.data) @contextlib.contextmanager def _refcounting(type_): """ Perform the body of a with statement with reference counting for the given type (given by class name)--raises an assertion error if there are more unfreed objects of the given type than when we entered the with statement. """ gc.collect() refcount = len(objgraph.by_type(type_)) yield refcount gc.collect() assert ( len(objgraph.by_type(type_)) <= refcount ), "More {0!r} objects still in memory than before." class TestVLATables(FitsTestCase): """Tests specific to tables containing variable-length arrays.""" def test_variable_length_columns(self): def test(format_code): col = fits.Column( name="QUAL_SPE", format=format_code, array=[[0] * 1571] * 225 ) tb_hdu = fits.BinTableHDU.from_columns([col]) pri_hdu = fits.PrimaryHDU() hdu_list = fits.HDUList([pri_hdu, tb_hdu]) hdu_list.writeto(self.temp("toto.fits"), overwrite=True) with fits.open(self.temp("toto.fits")) as toto: q = toto[1].data.field("QUAL_SPE") assert (q[0][4:8] == np.array([0, 0, 0, 0], dtype=np.uint8)).all() assert toto[1].columns[0].format.endswith("J(1571)") for code in ("PJ()", "QJ()"): test(code) def test_extend_variable_length_array(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/54""" def test(format_code): arr = [[1] * 10] * 10 col1 = fits.Column(name="TESTVLF", format=format_code, array=arr) col2 = fits.Column(name="TESTSCA", format="J", array=[1] * 10) tb_hdu = fits.BinTableHDU.from_columns([col1, col2], nrows=15) # This asserts that the normal 'scalar' column's length was extended assert len(tb_hdu.data["TESTSCA"]) == 15 # And this asserts that the VLF column was extended in the same manner assert len(tb_hdu.data["TESTVLF"]) == 15 # We can't compare the whole array since the _VLF is an array of # objects, but comparing just the edge case rows should suffice assert (tb_hdu.data["TESTVLF"][0] == arr[0]).all() assert (tb_hdu.data["TESTVLF"][9] == arr[9]).all() assert (tb_hdu.data["TESTVLF"][10] == ([0] * 10)).all() assert (tb_hdu.data["TESTVLF"][-1] == ([0] * 10)).all() for code in ("PJ()", "QJ()"): test(code) def test_variable_length_table_format_pd_from_object_array(self): def test(format_code): a = np.array( [np.array([7.2e-20, 7.3e-20]), np.array([0.0]), np.array([0.0])], "O" ) acol = fits.Column(name="testa", format=format_code, array=a) tbhdu = fits.BinTableHDU.from_columns([acol]) tbhdu.writeto(self.temp("newtable.fits"), overwrite=True) with fits.open(self.temp("newtable.fits")) as tbhdu1: assert tbhdu1[1].columns[0].format.endswith("D(2)") for j in range(3): for i in range(len(a[j])): assert tbhdu1[1].data.field(0)[j][i] == a[j][i] for code in ("PD()", "QD()"): test(code) def test_variable_length_table_format_pd_from_list(self): def test(format_code): a = [np.array([7.2e-20, 7.3e-20]), np.array([0.0]), np.array([0.0])] acol = fits.Column(name="testa", format=format_code, array=a) tbhdu = fits.BinTableHDU.from_columns([acol]) tbhdu.writeto(self.temp("newtable.fits"), overwrite=True) with fits.open(self.temp("newtable.fits")) as tbhdu1: assert tbhdu1[1].columns[0].format.endswith("D(2)") for j in range(3): for i in range(len(a[j])): assert tbhdu1[1].data.field(0)[j][i] == a[j][i] for code in ("PD()", "QD()"): test(code) def test_variable_length_table_format_pa_from_object_array(self): def test(format_code): a = np.array( [np.array(["a", "b", "c"]), np.array(["d", "e"]), np.array(["f"])], "O" ) acol = fits.Column(name="testa", format=format_code, array=a) tbhdu = fits.BinTableHDU.from_columns([acol]) tbhdu.writeto(self.temp("newtable.fits"), overwrite=True) with fits.open(self.temp("newtable.fits")) as hdul: assert hdul[1].columns[0].format.endswith("A(3)") for j in range(3): for i in range(len(a[j])): assert hdul[1].data.field(0)[j][i] == a[j][i] for code in ("PA()", "QA()"): test(code) def test_variable_length_table_format_pa_from_list(self): def test(format_code): a = ["a", "ab", "abc"] acol = fits.Column(name="testa", format=format_code, array=a) tbhdu = fits.BinTableHDU.from_columns([acol]) tbhdu.writeto(self.temp("newtable.fits"), overwrite=True) with fits.open(self.temp("newtable.fits")) as hdul: assert hdul[1].columns[0].format.endswith("A(3)") for j in range(3): for i in range(len(a[j])): assert hdul[1].data.field(0)[j][i] == a[j][i] for code in ("PA()", "QA()"): test(code) def test_getdata_vla(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/200""" def test(format_code): col = fits.Column( name="QUAL_SPE", format=format_code, array=[np.arange(1572)] * 225 ) tb_hdu = fits.BinTableHDU.from_columns([col]) pri_hdu = fits.PrimaryHDU() hdu_list = fits.HDUList([pri_hdu, tb_hdu]) hdu_list.writeto(self.temp("toto.fits"), overwrite=True) data = fits.getdata(self.temp("toto.fits")) # Need to compare to the original data row by row since the FITS_rec # returns an array of _VLA objects for row_a, row_b in zip(data["QUAL_SPE"], col.array): assert (row_a == row_b).all() for code in ("PJ()", "QJ()"): test(code) @pytest.mark.skipif( not NUMPY_LT_1_22 and NUMPY_LT_1_22_1 and sys.platform == "win32", reason="https://github.com/numpy/numpy/issues/20699", ) def test_copy_vla(self): """ Regression test for https://github.com/spacetelescope/PyFITS/issues/47 """ # Make a file containing a couple of VLA tables arr1 = [np.arange(n + 1) for n in range(255)] arr2 = [np.arange(255, 256 + n) for n in range(255)] # A dummy non-VLA column needed to reproduce issue #47 c = fits.Column("test", format="J", array=np.arange(255)) c1 = fits.Column("A", format="PJ", array=arr1) c2 = fits.Column("B", format="PJ", array=arr2) t1 = fits.BinTableHDU.from_columns([c, c1]) t2 = fits.BinTableHDU.from_columns([c, c2]) hdul = fits.HDUList([fits.PrimaryHDU(), t1, t2]) hdul.writeto(self.temp("test.fits"), overwrite=True) # Just test that the test file wrote out correctly with fits.open(self.temp("test.fits")) as h: assert h[1].header["TFORM2"] == "PJ(255)" assert h[2].header["TFORM2"] == "PJ(255)" assert comparerecords(h[1].data, t1.data) assert comparerecords(h[2].data, t2.data) # Try copying the second VLA and writing to a new file with fits.open(self.temp("test.fits")) as h: new_hdu = fits.BinTableHDU(data=h[2].data, header=h[2].header) new_hdu.writeto(self.temp("test3.fits")) with fits.open(self.temp("test3.fits")) as h2: assert comparerecords(h2[1].data, t2.data) new_hdul = fits.HDUList([fits.PrimaryHDU()]) new_hdul.writeto(self.temp("test2.fits")) # Open several copies of the test file and append copies of the second # VLA table with fits.open(self.temp("test2.fits"), mode="append") as new_hdul: for _ in range(2): with fits.open(self.temp("test.fits")) as h: new_hdul.append(h[2]) new_hdul.flush() # Test that all the VLA copies wrote correctly with fits.open(self.temp("test2.fits")) as new_hdul: for idx in range(1, 3): assert comparerecords(new_hdul[idx].data, t2.data) def test_vla_with_gap(self): hdul = fits.open(self.data("theap-gap.fits")) data = hdul[1].data assert data.shape == (500,) assert data["i"][497] == 497 assert np.array_equal(data["arr"][497], [0, 1, 2, 3, 4]) hdul.close() def test_tolist(self): col = fits.Column( name="var", format="PI()", array=np.array([[1, 2, 3], [11, 12]], dtype=np.object_), ) hdu = fits.BinTableHDU.from_columns([col]) assert hdu.data.tolist() == [[[1, 2, 3]], [[11, 12]]] assert hdu.data["var"].tolist() == [[1, 2, 3], [11, 12]] def test_tolist_from_file(self): filename = self.data("variable_length_table.fits") with fits.open(filename) as hdul: hdu = hdul[1] assert hdu.data.tolist() == [[[45, 56], [11, 3]], [[11, 12, 13], [12, 4]]] assert hdu.data["var"].tolist() == [[45, 56], [11, 12, 13]] @pytest.mark.skipif(sys.maxsize < 2**32, reason="requires 64-bit system") @pytest.mark.skipif(sys.platform == "win32", reason="Cannot test on Windows") @pytest.mark.hugemem def test_heapsize_P_limit(self): """ Regression test for https://github.com/astropy/astropy/issues/10812 Check if the error is raised when the heap size is bigger than what can be indexed with a 32 bit signed int. """ # a matrix with variable length array elements is created nelem = 2**28 matrix = np.zeros(1, dtype=np.object_) matrix[0] = np.arange(0.0, float(nelem + 1)) col = fits.Column(name="MATRIX", format=f"PD({nelem})", unit="", array=matrix) t = fits.BinTableHDU.from_columns([col]) t.name = "MATRIX" with pytest.raises( ValueError, match="Please consider using the 'Q' format for your file." ): t.writeto(self.temp("matrix.fits")) def test_empty_vla_raw_data(self): """ Regression test for https://github.com/astropy/astropy/issues/12881 Check if empty vla are correctly read. """ columns = [ fits.Column(name="integer", format="B", array=(1, 2)), fits.Column(name="empty", format="PJ", array=([], [])), ] fits.BinTableHDU.from_columns(columns).writeto(self.temp("bug.fits")) with fits.open(self.temp("bug.fits")) as hdu: # We can't compare the whole array since the _VLF is an array of # objects, hence we compare elementwise for i in range(len(hdu[1].data["empty"])): assert np.array_equal( hdu[1].data["empty"][i], np.array([], dtype=np.int32) ) def test_multidim_VLA_tables(self): """ Check if multidimensional VLF are correctly write and read. See https://github.com/astropy/astropy/issues/12860 and https://github.com/astropy/astropy/issues/7810 """ a = np.arange(5) b = np.arange(7) array = np.array([a, b], dtype=object) col = fits.Column(name="test", format="PD(7)", dim="(7,1)", array=array) fits.BinTableHDU.from_columns([col]).writeto(self.temp("test.fits")) with fits.open(self.temp("test.fits")) as hdus: print(hdus[1].data["test"][0]) assert hdus[1].columns.formats == ["PD(7)"] assert np.array_equal( hdus[1].data["test"][0], np.array([[0.0, 1.0, 2.0, 3.0, 4.0]]) ) assert np.array_equal( hdus[1].data["test"][1], np.array([[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0]]) ) a = np.arange(10).reshape((5, 2)) b = np.arange(14).reshape((7, 2)) array = np.array([a, b], dtype=object) col = fits.Column(name="test", format="PD(14)", dim="(2,7)", array=array) fits.BinTableHDU.from_columns([col]).writeto(self.temp("test2.fits")) with fits.open(self.temp("test2.fits")) as hdus: assert hdus[1].columns.formats == ["PD(14)"] assert np.array_equal( hdus[1].data["test"][0], np.array([[0.0, 1.0], [2.0, 3.0], [4.0, 5.0], [6.0, 7.0], [8.0, 9.0]]), ) assert np.array_equal( hdus[1].data["test"][1], np.array( [ [0.0, 1.0], [2.0, 3.0], [4.0, 5.0], [6.0, 7.0], [8.0, 9.0], [10.0, 11.0], [12.0, 13.0], ] ), ) a = np.arange(3).reshape((1, 3)) b = np.arange(6).reshape((2, 3)) array = np.array([a, b], dtype=object) col = fits.Column(name="test", format="PD(6)", dim="(3,2)", array=array) fits.BinTableHDU.from_columns([col]).writeto(self.temp("test3.fits")) with fits.open(self.temp("test3.fits")) as hdus: assert hdus[1].columns.formats == ["PD(6)"] assert np.array_equal(hdus[1].data["test"][0], np.array([[0.0, 1.0, 2.0]])) assert np.array_equal( hdus[1].data["test"][1], np.array([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]) ) # These are tests that solely test the Column and ColDefs interfaces and # related functionality without directly involving full tables; currently there # are few of these but I expect there to be more as I improve the test coverage class TestColumnFunctions(FitsTestCase): def test_column_format_interpretation(self): """ Test to ensure that when Numpy-style record formats are passed in to the Column constructor for the format argument, they are recognized so long as it's unambiguous (where "unambiguous" here is questionable since Numpy is case insensitive when parsing the format codes. But their "proper" case is lower-case, so we can accept that. Basically, actually, any key in the NUMPY2FITS dict should be accepted. """ for recformat, fitsformat in NUMPY2FITS.items(): c = fits.Column("TEST", np.dtype(recformat)) c.format == fitsformat c = fits.Column("TEST", recformat) c.format == fitsformat c = fits.Column("TEST", fitsformat) c.format == fitsformat # Test a few cases that are ambiguous in that they *are* valid binary # table formats though not ones that are likely to be used, but are # also valid common ASCII table formats c = fits.Column("TEST", "I4") assert c.format == "I4" assert c.format.format == "I" assert c.format.width == 4 c = fits.Column("TEST", "F15.8") assert c.format == "F15.8" assert c.format.format == "F" assert c.format.width == 15 assert c.format.precision == 8 c = fits.Column("TEST", "E15.8") assert c.format.format == "E" assert c.format.width == 15 assert c.format.precision == 8 c = fits.Column("TEST", "D15.8") assert c.format.format == "D" assert c.format.width == 15 assert c.format.precision == 8 # zero-precision should be allowed as well, for float types # https://github.com/astropy/astropy/issues/3422 c = fits.Column("TEST", "F10.0") assert c.format.format == "F" assert c.format.width == 10 assert c.format.precision == 0 c = fits.Column("TEST", "E10.0") assert c.format.format == "E" assert c.format.width == 10 assert c.format.precision == 0 c = fits.Column("TEST", "D10.0") assert c.format.format == "D" assert c.format.width == 10 assert c.format.precision == 0 # These are a couple cases where the format code is a valid binary # table format, and is not strictly a valid ASCII table format but # could be *interpreted* as one by appending a default width. This # will only happen either when creating an ASCII table or when # explicitly specifying ascii=True when the column is created c = fits.Column("TEST", "I") assert c.format == "I" assert c.format.recformat == "i2" c = fits.Column("TEST", "I", ascii=True) assert c.format == "I10" assert c.format.recformat == "i4" # With specified widths, integer precision should be set appropriately c = fits.Column("TEST", "I4", ascii=True) assert c.format == "I4" assert c.format.recformat == "i2" c = fits.Column("TEST", "I9", ascii=True) assert c.format == "I9" assert c.format.recformat == "i4" c = fits.Column("TEST", "I12", ascii=True) assert c.format == "I12" assert c.format.recformat == "i8" c = fits.Column("TEST", "E") assert c.format == "E" assert c.format.recformat == "f4" c = fits.Column("TEST", "E", ascii=True) assert c.format == "E15.7" # F is not a valid binary table format so it should be unambiguously # treated as an ASCII column c = fits.Column("TEST", "F") assert c.format == "F16.7" c = fits.Column("TEST", "D") assert c.format == "D" assert c.format.recformat == "f8" c = fits.Column("TEST", "D", ascii=True) assert c.format == "D25.17" def test_zero_precision_float_column(self): """ Regression test for https://github.com/astropy/astropy/issues/3422 """ c = fits.Column("TEST", "F5.0", array=[1.1, 2.2, 3.3]) # The decimal places will be clipped t = fits.TableHDU.from_columns([c]) t.writeto(self.temp("test.fits")) with fits.open(self.temp("test.fits")) as hdul: assert hdul[1].header["TFORM1"] == "F5.0" assert hdul[1].data["TEST"].dtype == np.dtype("float64") assert np.all(hdul[1].data["TEST"] == [1.0, 2.0, 3.0]) # Check how the raw data looks raw = np.rec.recarray.field(hdul[1].data, "TEST") assert raw.tobytes() == b" 1. 2. 3." def test_column_array_type_mismatch(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/218""" arr = [-99] * 20 col = fits.Column("mag", format="E", array=arr) assert (arr == col.array).all() def test_new_coldefs_with_invalid_seqence(self): """Test that a TypeError is raised when a ColDefs is instantiated with a sequence of non-Column objects. """ pytest.raises(TypeError, fits.ColDefs, [1, 2, 3]) def test_coldefs_init_from_array(self): """Test that ColDefs._init_from_array works with single element data- types as well as multi-element data-types """ nd_array = np.ndarray((1,), dtype=[("A", "<u4", (2,)), ("B", ">u2")]) col_defs = fits.column.ColDefs(nd_array) assert 2**31 == col_defs["A"].bzero assert 2**15 == col_defs["B"].bzero def test_pickle(self): """ Regression test for https://github.com/astropy/astropy/issues/1597 Tests for pickling FITS_rec objects """ # open existing FITS tables (images pickle by default, no test needed): with fits.open(self.data("tb.fits")) as btb: # Test column array is delayed and can pickle assert isinstance(btb[1].columns._arrays[0], Delayed) btb_pd = pickle.dumps(btb[1].data) btb_pl = pickle.loads(btb_pd) # It should not be delayed any more assert not isinstance(btb[1].columns._arrays[0], Delayed) assert comparerecords(btb_pl, btb[1].data) with fits.open(self.data("ascii.fits")) as asc: asc_pd = pickle.dumps(asc[1].data) asc_pl = pickle.loads(asc_pd) assert comparerecords(asc_pl, asc[1].data) with fits.open(self.data("random_groups.fits")) as rgr: rgr_pd = pickle.dumps(rgr[0].data) rgr_pl = pickle.loads(rgr_pd) assert comparerecords(rgr_pl, rgr[0].data) with fits.open(self.data("zerowidth.fits")) as zwc: # Doesn't pickle zero-width (_phanotm) column 'ORBPARM' zwc_pd = pickle.dumps(zwc[2].data) zwc_pl = pickle.loads(zwc_pd) with pytest.warns( UserWarning, match=r"Field 2 has a repeat count " r"of 0 in its format code", ): assert comparerecords(zwc_pl, zwc[2].data) def test_column_lookup_by_name(self): """Tests that a `ColDefs` can be indexed by column name.""" a = fits.Column(name="a", format="D") b = fits.Column(name="b", format="D") cols = fits.ColDefs([a, b]) assert cols["a"] == cols[0] assert cols["b"] == cols[1] def test_column_attribute_change_after_removal(self): """ This is a test of the column attribute change notification system. After a column has been removed from a table (but other references are kept to that same column) changes to that column's attributes should not trigger a notification on the table it was removed from. """ # One way we can check this is to ensure there are no further changes # to the header table = fits.BinTableHDU.from_columns( [fits.Column("a", format="D"), fits.Column("b", format="D")] ) b = table.columns["b"] table.columns.del_col("b") assert table.data.dtype.names == ("a",) b.name = "HELLO" assert b.name == "HELLO" assert "TTYPE2" not in table.header assert table.header["TTYPE1"] == "a" assert table.columns.names == ["a"] with pytest.raises(KeyError): table.columns["b"] # Make sure updates to the remaining column still work table.columns.change_name("a", "GOODBYE") with pytest.raises(KeyError): table.columns["a"] assert table.columns["GOODBYE"].name == "GOODBYE" assert table.data.dtype.names == ("GOODBYE",) assert table.columns.names == ["GOODBYE"] assert table.data.columns.names == ["GOODBYE"] table.columns["GOODBYE"].name = "foo" with pytest.raises(KeyError): table.columns["GOODBYE"] assert table.columns["foo"].name == "foo" assert table.data.dtype.names == ("foo",) assert table.columns.names == ["foo"] assert table.data.columns.names == ["foo"] def test_x_column_deepcopy(self): """ Regression test for https://github.com/astropy/astropy/pull/4514 Tests that columns with the X (bit array) format can be deep-copied. """ c = fits.Column("xcol", format="5X", array=[1, 0, 0, 1, 0]) c2 = copy.deepcopy(c) assert c2.name == c.name assert c2.format == c.format assert np.all(c2.array == c.array) def test_p_column_deepcopy(self): """ Regression test for https://github.com/astropy/astropy/pull/4514 Tests that columns with the P/Q formats (variable length arrays) can be deep-copied. """ c = fits.Column("pcol", format="PJ", array=[[1, 2], [3, 4, 5]]) c2 = copy.deepcopy(c) assert c2.name == c.name assert c2.format == c.format assert np.all(c2.array[0] == c.array[0]) assert np.all(c2.array[1] == c.array[1]) c3 = fits.Column("qcol", format="QJ", array=[[1, 2], [3, 4, 5]]) c4 = copy.deepcopy(c3) assert c4.name == c3.name assert c4.format == c3.format assert np.all(c4.array[0] == c3.array[0]) assert np.all(c4.array[1] == c3.array[1]) def test_column_verify_keywords(self): """ Test that the keyword arguments used to initialize a Column, specifically those that typically read from a FITS header (so excluding array), are verified to have a valid value. """ with pytest.raises(AssertionError) as err: _ = fits.Column(1, format="I", array=[1, 2, 3, 4, 5]) assert "Column name must be a string able to fit" in str(err.value) with pytest.raises(VerifyError) as err: _ = fits.Column( "col", format=0, null="Nan", disp=1, coord_type=1, coord_unit=2, coord_inc="1", time_ref_pos=1, coord_ref_point="1", coord_ref_value="1", ) err_msgs = [ "keyword arguments to Column were invalid", "TFORM", "TNULL", "TDISP", "TCTYP", "TCUNI", "TCRPX", "TCRVL", "TCDLT", "TRPOS", ] for msg in err_msgs: assert msg in str(err.value) def test_column_verify_start(self): """ Regression test for https://github.com/astropy/astropy/pull/6359 Test the validation of the column start position option (ASCII table only), corresponding to ``TBCOL`` keyword. Test whether the VerifyError message generated is the one with highest priority, i.e. the order of error messages to be displayed is maintained. """ with pytest.raises(VerifyError) as err: _ = fits.Column("a", format="B", start="a", array=[1, 2, 3]) assert "start option (TBCOLn) is not allowed for binary table columns" in str( err.value ) with pytest.raises(VerifyError) as err: _ = fits.Column("a", format="I", start="a", array=[1, 2, 3]) assert "start option (TBCOLn) must be a positive integer (got 'a')." in str( err.value ) with pytest.raises(VerifyError) as err: _ = fits.Column("a", format="I", start="-56", array=[1, 2, 3]) assert "start option (TBCOLn) must be a positive integer (got -56)." in str( err.value ) @pytest.mark.parametrize( "keys", [ {"TFORM": "Z", "TDISP": "E"}, {"TFORM": "2", "TDISP": "2E"}, {"TFORM": 3, "TDISP": 6.3}, {"TFORM": float, "TDISP": np.float64}, {"TFORM": "", "TDISP": "E.5"}, ], ) def test_column_verify_formats(self, keys): """ Additional tests for verification of 'TFORM' and 'TDISP' keyword arguments used to initialize a Column. """ with pytest.raises(VerifyError) as err: _ = fits.Column("col", format=keys["TFORM"], disp=keys["TDISP"]) for key in keys.keys(): assert key in str(err.value) assert str(keys[key]) in str(err.value) def test_regression_5383(): # Regression test for an undefined variable x = np.array([1, 2, 3]) col = fits.Column(name="a", array=x, format="E") hdu = fits.BinTableHDU.from_columns([col]) del hdu._header["TTYPE1"] hdu.columns[0].name = "b" def test_table_to_hdu(): from astropy.table import Table table = Table( [[1, 2, 3], ["a", "b", "c"], [2.3, 4.5, 6.7]], names=["a", "b", "c"], dtype=["i", "U1", "f"], ) table["a"].unit = "m/s" table["b"].unit = "not-a-unit" table.meta["foo"] = "bar" with pytest.warns( UnitsWarning, match="'not-a-unit' did not parse as fits unit" ) as w: hdu = fits.BinTableHDU(table, header=fits.Header({"TEST": 1})) assert len(w) == 1 for name in "abc": assert np.array_equal(table[name], hdu.data[name]) # Check that TUNITn cards appear in the correct order # (https://github.com/astropy/astropy/pull/5720) assert hdu.header.index("TUNIT1") < hdu.header.index("TTYPE2") assert hdu.header["FOO"] == "bar" assert hdu.header["TEST"] == 1 def test_regression_scalar_indexing(): # Indexing a FITS_rec with a tuple that returns a scalar record # should work x = np.array([(1.0, 2), (3.0, 4)], dtype=[("x", float), ("y", int)]).view( fits.FITS_rec ) x1a = x[1] # this should succeed. x1b = x[(1,)] # FITS_record does not define __eq__; so test elements. assert all(a == b for a, b in zip(x1a, x1b)) def test_new_column_attributes_preserved(tmp_path): # Regression test for https://github.com/astropy/astropy/issues/7145 # This makes sure that for now we don't clear away keywords that have # newly been recognized (in Astropy 3.0) as special column attributes but # instead just warn that we might do so in future. The new keywords are: # TCTYP, TCUNI, TCRPX, TCRVL, TCDLT, TRPOS col = [] col.append(fits.Column(name="TIME", format="1E", unit="s")) col.append(fits.Column(name="RAWX", format="1I", unit="pixel")) col.append(fits.Column(name="RAWY", format="1I")) cd = fits.ColDefs(col) hdr = fits.Header() # Keywords that will get ignored in favor of these in the data hdr["TUNIT1"] = "pixel" hdr["TUNIT2"] = "m" hdr["TUNIT3"] = "m" # Keywords that were added in Astropy 3.0 that should eventually be # ignored and set on the data instead hdr["TCTYP2"] = "RA---TAN" hdr["TCTYP3"] = "ANGLE" hdr["TCRVL2"] = -999.0 hdr["TCRVL3"] = -999.0 hdr["TCRPX2"] = 1.0 hdr["TCRPX3"] = 1.0 hdr["TALEN2"] = 16384 hdr["TALEN3"] = 1024 hdr["TCUNI2"] = "angstrom" hdr["TCUNI3"] = "deg" # Other non-relevant keywords hdr["RA"] = 1.5 hdr["DEC"] = 3.0 with pytest.warns(AstropyDeprecationWarning) as warning_list: hdu = fits.BinTableHDU.from_columns(cd, hdr) assert str(warning_list[0].message).startswith( "The following keywords are now recognized as special" ) # First, check that special keywords such as TUNIT are ignored in the header # We may want to change that behavior in future, but this is the way it's # been for a while now. assert hdu.columns[0].unit == "s" assert hdu.columns[1].unit == "pixel" assert hdu.columns[2].unit is None assert hdu.header["TUNIT1"] == "s" assert hdu.header["TUNIT2"] == "pixel" assert "TUNIT3" not in hdu.header # TUNIT3 was removed # Now, check that the new special keywords are actually still there # but weren't used to set the attributes on the data assert hdu.columns[0].coord_type is None assert hdu.columns[1].coord_type is None assert hdu.columns[2].coord_type is None assert "TCTYP1" not in hdu.header assert hdu.header["TCTYP2"] == "RA---TAN" assert hdu.header["TCTYP3"] == "ANGLE" # Make sure that other keywords are still there assert hdu.header["RA"] == 1.5 assert hdu.header["DEC"] == 3.0 # Now we can write this HDU to a file and re-load. Re-loading *should* # cause the special column attribtues to be picked up (it's just that when a # header is manually specified, these values are ignored) filename = tmp_path / "test.fits" hdu.writeto(filename) # Make sure we don't emit a warning in this case with warnings.catch_warnings(record=True) as warning_list: with fits.open(filename) as hdul: hdu2 = hdul[1] assert len(warning_list) == 0 # Check that column attributes are now correctly set assert hdu2.columns[0].unit == "s" assert hdu2.columns[1].unit == "pixel" assert hdu2.columns[2].unit is None assert hdu2.header["TUNIT1"] == "s" assert hdu2.header["TUNIT2"] == "pixel" assert "TUNIT3" not in hdu2.header # TUNIT3 was removed # Now, check that the new special keywords are actually still there # but weren't used to set the attributes on the data assert hdu2.columns[0].coord_type is None assert hdu2.columns[1].coord_type == "RA---TAN" assert hdu2.columns[2].coord_type == "ANGLE" assert "TCTYP1" not in hdu2.header assert hdu2.header["TCTYP2"] == "RA---TAN" assert hdu2.header["TCTYP3"] == "ANGLE" # Make sure that other keywords are still there assert hdu2.header["RA"] == 1.5 assert hdu2.header["DEC"] == 3.0 def test_empty_table(tmp_path): ofile = tmp_path / "emptytable.fits" hdu = fits.BinTableHDU(header=None, data=None, name="TEST") hdu.writeto(ofile) with fits.open(ofile) as hdul: assert hdul["TEST"].data.size == 0 ofile = tmp_path / "emptytable.fits.gz" hdu = fits.BinTableHDU(header=None, data=None, name="TEST") hdu.writeto(ofile, overwrite=True) with fits.open(ofile) as hdul: assert hdul["TEST"].data.size == 0 def test_a3dtable(tmp_path): testfile = tmp_path / "test.fits" hdu = fits.BinTableHDU.from_columns( [fits.Column(name="FOO", format="J", array=np.arange(10))] ) hdu.header["XTENSION"] = "A3DTABLE" hdu.writeto(testfile, output_verify="ignore") with fits.open(testfile) as hdul: assert hdul[1].header["XTENSION"] == "A3DTABLE" with pytest.warns(AstropyUserWarning) as w: hdul.verify("fix") assert str(w[0].message) == "Verification reported errors:" assert str(w[2].message).endswith("Converted the XTENSION keyword to BINTABLE.") assert hdul[1].header["XTENSION"] == "BINTABLE" def test_invalid_file(tmp_path): hdu = fits.BinTableHDU() # little trick to write an invalid card ... hdu.header["FOO"] = None hdu.header.cards["FOO"]._value = np.nan testfile = tmp_path / "test.fits" hdu.writeto(testfile, output_verify="ignore") with fits.open(testfile) as hdul: assert hdul[1].data is not None def test_unit_parse_strict(tmp_path): path = tmp_path / "invalid_unit.fits" # this is a unit parseable by the generic format but invalid for FITS invalid_unit = "1 / (MeV sr s)" unit = Unit(invalid_unit) t = Table({"a": [1, 2, 3]}) t.write(path) with fits.open(path, mode="update") as hdul: hdul[1].header["TUNIT1"] = invalid_unit # default is "warn" with pytest.warns(UnitsWarning): t = Table.read(path) assert isinstance(t["a"].unit, UnrecognizedUnit) t = Table.read(path, unit_parse_strict="silent") assert isinstance(t["a"].unit, UnrecognizedUnit) with pytest.raises(ValueError): Table.read(path, unit_parse_strict="raise") with pytest.warns(UnitsWarning): Table.read(path, unit_parse_strict="warn")
def8cc4358262b7134fd622df25dccb4c5db3091171c9f1b720f596ab03d0bc2
# Licensed under a 3-clause BSD style license - see PYFITS.rst import copy import io import os import subprocess import sys import numpy as np import pytest from astropy.io import fits from astropy.io.fits.hdu.base import _NonstandardHDU, _ValidHDU from astropy.io.fits.verify import VerifyError, VerifyWarning from astropy.utils.data import get_pkg_data_filenames from astropy.utils.exceptions import AstropyUserWarning from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH from .conftest import FitsTestCase class TestHDUListFunctions(FitsTestCase): def test_update_name(self): with fits.open(self.data("o4sp040b0_raw.fits")) as hdul: hdul[4].name = "Jim" hdul[4].ver = 9 assert hdul[("JIM", 9)].header["extname"] == "JIM" def test_hdu_file_bytes(self): with fits.open(self.data("checksum.fits")) as hdul: res = hdul[0].filebytes() assert res == 11520 res = hdul[1].filebytes() assert res == 8640 def test_hdulist_file_info(self): def test_fileinfo(**kwargs): assert res["datSpan"] == kwargs.get("datSpan", 2880) assert res["resized"] == kwargs.get("resized", False) assert res["filename"] == self.data("checksum.fits") assert res["datLoc"] == kwargs.get("datLoc", 8640) assert res["hdrLoc"] == kwargs.get("hdrLoc", 0) assert res["filemode"] == "readonly" with fits.open(self.data("checksum.fits")) as hdul: res = hdul.fileinfo(0) res = hdul.fileinfo(1) test_fileinfo(datLoc=17280, hdrLoc=11520) hdu = fits.ImageHDU(data=hdul[0].data) hdul.insert(1, hdu) res = hdul.fileinfo(0) test_fileinfo(resized=True) res = hdul.fileinfo(1) test_fileinfo(datSpan=None, resized=True, datLoc=None, hdrLoc=None) res = hdul.fileinfo(2) test_fileinfo(resized=1, datLoc=17280, hdrLoc=11520) def test_create_from_multiple_primary(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/145 Ensure that a validation error occurs when saving an HDUList containing multiple PrimaryHDUs. """ hdul = fits.HDUList([fits.PrimaryHDU(), fits.PrimaryHDU()]) pytest.raises( VerifyError, hdul.writeto, self.temp("temp.fits"), output_verify="exception" ) def test_append_primary_to_empty_list(self): # Tests appending a Simple PrimaryHDU to an empty HDUList. hdul = fits.HDUList() hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32)) hdul.append(hdu) info = [(0, "PRIMARY", 1, "PrimaryHDU", 5, (100,), "int32", "")] assert hdul.info(output=False) == info hdul.writeto(self.temp("test-append.fits")) assert fits.info(self.temp("test-append.fits"), output=False) == info def test_append_extension_to_empty_list(self): """Tests appending a Simple ImageHDU to an empty HDUList.""" hdul = fits.HDUList() hdu = fits.ImageHDU(np.arange(100, dtype=np.int32)) hdul.append(hdu) info = [(0, "PRIMARY", 1, "PrimaryHDU", 4, (100,), "int32", "")] assert hdul.info(output=False) == info hdul.writeto(self.temp("test-append.fits")) assert fits.info(self.temp("test-append.fits"), output=False) == info def test_append_table_extension_to_empty_list(self): """Tests appending a Simple Table ExtensionHDU to a empty HDUList.""" hdul = fits.HDUList() with fits.open(self.data("tb.fits")) as hdul1: hdul.append(hdul1[1]) info = [ (0, "PRIMARY", 1, "PrimaryHDU", 4, (), "", ""), (1, "", 1, "BinTableHDU", 24, "2R x 4C", "[1J, 3A, 1E, 1L]", ""), ] assert hdul.info(output=False) == info hdul.writeto(self.temp("test-append.fits")) assert fits.info(self.temp("test-append.fits"), output=False) == info def test_append_groupshdu_to_empty_list(self): """Tests appending a Simple GroupsHDU to an empty HDUList.""" hdul = fits.HDUList() hdu = fits.GroupsHDU() hdul.append(hdu) info = [(0, "PRIMARY", 1, "GroupsHDU", 8, (), "", "1 Groups 0 Parameters")] assert hdul.info(output=False) == info hdul.writeto(self.temp("test-append.fits")) assert fits.info(self.temp("test-append.fits"), output=False) == info def test_append_primary_to_non_empty_list(self): """Tests appending a Simple PrimaryHDU to a non-empty HDUList.""" with fits.open(self.data("arange.fits")) as hdul: hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32)) hdul.append(hdu) info = [ (0, "PRIMARY", 1, "PrimaryHDU", 7, (11, 10, 7), "int32", ""), (1, "", 1, "ImageHDU", 6, (100,), "int32", ""), ] assert hdul.info(output=False) == info hdul.writeto(self.temp("test-append.fits")) assert fits.info(self.temp("test-append.fits"), output=False) == info def test_append_extension_to_non_empty_list(self): """Tests appending a Simple ExtensionHDU to a non-empty HDUList.""" with fits.open(self.data("tb.fits")) as hdul: hdul.append(hdul[1]) info = [ (0, "PRIMARY", 1, "PrimaryHDU", 11, (), "", ""), (1, "", 1, "BinTableHDU", 24, "2R x 4C", "[1J, 3A, 1E, 1L]", ""), (2, "", 1, "BinTableHDU", 24, "2R x 4C", "[1J, 3A, 1E, 1L]", ""), ] assert hdul.info(output=False) == info hdul.writeto(self.temp("test-append.fits")) assert fits.info(self.temp("test-append.fits"), output=False) == info def test_append_groupshdu_to_non_empty_list(self): """Tests appending a Simple GroupsHDU to an empty HDUList.""" hdul = fits.HDUList() hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32)) hdul.append(hdu) hdu = fits.GroupsHDU() with pytest.raises(ValueError): hdul.append(hdu) def test_insert_primary_to_empty_list(self): """Tests inserting a Simple PrimaryHDU to an empty HDUList.""" hdul = fits.HDUList() hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32)) hdul.insert(0, hdu) info = [(0, "PRIMARY", 1, "PrimaryHDU", 5, (100,), "int32", "")] assert hdul.info(output=False) == info hdul.writeto(self.temp("test-insert.fits")) assert fits.info(self.temp("test-insert.fits"), output=False) == info def test_insert_extension_to_empty_list(self): """Tests inserting a Simple ImageHDU to an empty HDUList.""" hdul = fits.HDUList() hdu = fits.ImageHDU(np.arange(100, dtype=np.int32)) hdul.insert(0, hdu) info = [(0, "PRIMARY", 1, "PrimaryHDU", 4, (100,), "int32", "")] assert hdul.info(output=False) == info hdul.writeto(self.temp("test-insert.fits")) assert fits.info(self.temp("test-insert.fits"), output=False) == info def test_insert_table_extension_to_empty_list(self): """Tests inserting a Simple Table ExtensionHDU to a empty HDUList.""" hdul = fits.HDUList() with fits.open(self.data("tb.fits")) as hdul1: hdul.insert(0, hdul1[1]) info = [ (0, "PRIMARY", 1, "PrimaryHDU", 4, (), "", ""), (1, "", 1, "BinTableHDU", 24, "2R x 4C", "[1J, 3A, 1E, 1L]", ""), ] assert hdul.info(output=False) == info hdul.writeto(self.temp("test-insert.fits")) assert fits.info(self.temp("test-insert.fits"), output=False) == info def test_insert_groupshdu_to_empty_list(self): """Tests inserting a Simple GroupsHDU to an empty HDUList.""" hdul = fits.HDUList() hdu = fits.GroupsHDU() hdul.insert(0, hdu) info = [(0, "PRIMARY", 1, "GroupsHDU", 8, (), "", "1 Groups 0 Parameters")] assert hdul.info(output=False) == info hdul.writeto(self.temp("test-insert.fits")) assert fits.info(self.temp("test-insert.fits"), output=False) == info def test_insert_primary_to_non_empty_list(self): """Tests inserting a Simple PrimaryHDU to a non-empty HDUList.""" with fits.open(self.data("arange.fits")) as hdul: hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32)) hdul.insert(1, hdu) info = [ (0, "PRIMARY", 1, "PrimaryHDU", 7, (11, 10, 7), "int32", ""), (1, "", 1, "ImageHDU", 6, (100,), "int32", ""), ] assert hdul.info(output=False) == info hdul.writeto(self.temp("test-insert.fits")) assert fits.info(self.temp("test-insert.fits"), output=False) == info def test_insert_extension_to_non_empty_list(self): """Tests inserting a Simple ExtensionHDU to a non-empty HDUList.""" with fits.open(self.data("tb.fits")) as hdul: hdul.insert(1, hdul[1]) info = [ (0, "PRIMARY", 1, "PrimaryHDU", 11, (), "", ""), (1, "", 1, "BinTableHDU", 24, "2R x 4C", "[1J, 3A, 1E, 1L]", ""), (2, "", 1, "BinTableHDU", 24, "2R x 4C", "[1J, 3A, 1E, 1L]", ""), ] assert hdul.info(output=False) == info hdul.writeto(self.temp("test-insert.fits")) assert fits.info(self.temp("test-insert.fits"), output=False) == info def test_insert_groupshdu_to_non_empty_list(self): """Tests inserting a Simple GroupsHDU to an empty HDUList.""" hdul = fits.HDUList() hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32)) hdul.insert(0, hdu) hdu = fits.GroupsHDU() with pytest.raises(ValueError): hdul.insert(1, hdu) info = [ (0, "PRIMARY", 1, "GroupsHDU", 8, (), "", "1 Groups 0 Parameters"), (1, "", 1, "ImageHDU", 6, (100,), "int32", ""), ] hdul.insert(0, hdu) assert hdul.info(output=False) == info hdul.writeto(self.temp("test-insert.fits")) assert fits.info(self.temp("test-insert.fits"), output=False) == info def test_insert_groupshdu_to_begin_of_hdulist_with_groupshdu(self): """ Tests inserting a Simple GroupsHDU to the beginning of an HDUList that that already contains a GroupsHDU. """ hdul = fits.HDUList() hdu = fits.GroupsHDU() hdul.insert(0, hdu) with pytest.raises(ValueError): hdul.insert(0, hdu) def test_insert_extension_to_primary_in_non_empty_list(self): # Tests inserting a Simple ExtensionHDU to a non-empty HDUList. with fits.open(self.data("tb.fits")) as hdul: hdul.insert(0, hdul[1]) info = [ (0, "PRIMARY", 1, "PrimaryHDU", 4, (), "", ""), (1, "", 1, "BinTableHDU", 24, "2R x 4C", "[1J, 3A, 1E, 1L]", ""), (2, "", 1, "ImageHDU", 12, (), "", ""), (3, "", 1, "BinTableHDU", 24, "2R x 4C", "[1J, 3A, 1E, 1L]", ""), ] assert hdul.info(output=False) == info hdul.writeto(self.temp("test-insert.fits")) assert fits.info(self.temp("test-insert.fits"), output=False) == info def test_insert_image_extension_to_primary_in_non_empty_list(self): """ Tests inserting a Simple Image ExtensionHDU to a non-empty HDUList as the primary HDU. """ with fits.open(self.data("tb.fits")) as hdul: hdu = fits.ImageHDU(np.arange(100, dtype=np.int32)) hdul.insert(0, hdu) info = [ (0, "PRIMARY", 1, "PrimaryHDU", 5, (100,), "int32", ""), (1, "", 1, "ImageHDU", 12, (), "", ""), (2, "", 1, "BinTableHDU", 24, "2R x 4C", "[1J, 3A, 1E, 1L]", ""), ] assert hdul.info(output=False) == info hdul.writeto(self.temp("test-insert.fits")) assert fits.info(self.temp("test-insert.fits"), output=False) == info def test_filename(self, home_is_data): """Tests the HDUList filename method.""" with fits.open(self.data("tb.fits")) as hdul: name = hdul.filename() assert name == os.path.expanduser(self.data("tb.fits")) def test_file_like(self): """ Tests the use of a file like object with no tell or seek methods in HDUList.writeto(), HDULIST.flush() or astropy.io.fits.writeto() """ hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32)) hdul = fits.HDUList() hdul.append(hdu) tmpfile = open(self.temp("tmpfile.fits"), "wb") hdul.writeto(tmpfile) tmpfile.close() info = [(0, "PRIMARY", 1, "PrimaryHDU", 5, (100,), "int32", "")] assert fits.info(self.temp("tmpfile.fits"), output=False) == info def test_file_like_2(self): hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32)) tmpfile = open(self.temp("tmpfile.fits"), "wb") hdul = fits.open(tmpfile, mode="ostream") hdul.append(hdu) hdul.flush() tmpfile.close() hdul.close() info = [(0, "PRIMARY", 1, "PrimaryHDU", 5, (100,), "int32", "")] assert fits.info(self.temp("tmpfile.fits"), output=False) == info def test_file_like_3(self): tmpfile = open(self.temp("tmpfile.fits"), "wb") fits.writeto(tmpfile, np.arange(100, dtype=np.int32)) tmpfile.close() info = [(0, "PRIMARY", 1, "PrimaryHDU", 5, (100,), "int32", "")] assert fits.info(self.temp("tmpfile.fits"), output=False) == info def test_shallow_copy(self): """ Tests that `HDUList.__copy__()` and `HDUList.copy()` return a shallow copy (regression test for #7211). """ n = np.arange(10.0) primary_hdu = fits.PrimaryHDU(n) hdu = fits.ImageHDU(n) hdul = fits.HDUList([primary_hdu, hdu]) for hdulcopy in (hdul.copy(), copy.copy(hdul)): assert isinstance(hdulcopy, fits.HDUList) assert hdulcopy is not hdul assert hdulcopy[0] is hdul[0] assert hdulcopy[1] is hdul[1] def test_deep_copy(self): """ Tests that `HDUList.__deepcopy__()` returns a deep copy. """ n = np.arange(10.0) primary_hdu = fits.PrimaryHDU(n) hdu = fits.ImageHDU(n) hdul = fits.HDUList([primary_hdu, hdu]) hdulcopy = copy.deepcopy(hdul) assert isinstance(hdulcopy, fits.HDUList) assert hdulcopy is not hdul for index in range(len(hdul)): assert hdulcopy[index] is not hdul[index] assert hdulcopy[index].header == hdul[index].header np.testing.assert_array_equal(hdulcopy[index].data, hdul[index].data) def test_new_hdu_extname(self): """ Tests that new extension HDUs that are added to an HDUList can be properly indexed by their EXTNAME/EXTVER (regression test for ticket:48). """ with fits.open(self.data("test0.fits")) as f: hdul = fits.HDUList() hdul.append(f[0].copy()) hdu = fits.ImageHDU(header=f[1].header) hdul.append(hdu) assert hdul[1].header["EXTNAME"] == "SCI" assert hdul[1].header["EXTVER"] == 1 assert hdul.index_of(("SCI", 1)) == 1 assert hdul.index_of(hdu) == len(hdul) - 1 def test_update_filelike(self): """Test opening a file-like object in update mode and resizing the HDU. """ sf = io.BytesIO() arr = np.zeros((100, 100)) hdu = fits.PrimaryHDU(data=arr) hdu.writeto(sf) sf.seek(0) arr = np.zeros((200, 200)) hdul = fits.open(sf, mode="update") hdul[0].data = arr hdul.flush() sf.seek(0) hdul = fits.open(sf) assert len(hdul) == 1 assert (hdul[0].data == arr).all() def test_flush_readonly(self): """Test flushing changes to a file opened in a read only mode.""" oldmtime = os.stat(self.data("test0.fits")).st_mtime with fits.open(self.data("test0.fits")) as hdul: hdul[0].header["FOO"] = "BAR" with pytest.warns(AstropyUserWarning, match="mode is not supported") as w: hdul.flush() assert len(w) == 1 assert oldmtime == os.stat(self.data("test0.fits")).st_mtime def test_fix_extend_keyword(self): hdul = fits.HDUList() hdul.append(fits.PrimaryHDU()) hdul.append(fits.ImageHDU()) del hdul[0].header["EXTEND"] hdul.verify("silentfix") assert "EXTEND" in hdul[0].header assert hdul[0].header["EXTEND"] is True def test_fix_malformed_naxisj(self): """ Tests that malformed NAXISj values are fixed sensibly. """ hdu = fits.open(self.data("arange.fits")) # Malform NAXISj header data hdu[0].header["NAXIS1"] = 11.0 hdu[0].header["NAXIS2"] = "10.0" hdu[0].header["NAXIS3"] = "7" # Axes cache needs to be malformed as well hdu[0]._axes = [11.0, "10.0", "7"] # Perform verification including the fix hdu.verify("silentfix") # Check that malformed data was converted assert hdu[0].header["NAXIS1"] == 11 assert hdu[0].header["NAXIS2"] == 10 assert hdu[0].header["NAXIS3"] == 7 hdu.close() def test_fix_wellformed_naxisj(self): """ Tests that wellformed NAXISj values are not modified. """ hdu = fits.open(self.data("arange.fits")) # Fake new NAXISj header data hdu[0].header["NAXIS1"] = 768 hdu[0].header["NAXIS2"] = 64 hdu[0].header["NAXIS3"] = 8 # Axes cache needs to be faked as well hdu[0]._axes = [768, 64, 8] # Perform verification including the fix hdu.verify("silentfix") # Check that malformed data was converted assert hdu[0].header["NAXIS1"] == 768 assert hdu[0].header["NAXIS2"] == 64 assert hdu[0].header["NAXIS3"] == 8 hdu.close() def test_new_hdulist_extend_keyword(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/114 Tests that adding a PrimaryHDU to a new HDUList object updates the EXTEND keyword on that HDU. """ h0 = fits.Header() hdu = fits.PrimaryHDU(header=h0) sci = fits.ImageHDU(data=np.array(10)) image = fits.HDUList([hdu, sci]) image.writeto(self.temp("temp.fits")) assert "EXTEND" in hdu.header assert hdu.header["EXTEND"] is True def test_replace_memmaped_array(self, home_is_temp): # Copy the original before we modify it with fits.open(self.data("test0.fits")) as hdul: hdul.writeto(self.temp("temp.fits")) hdul = fits.open(self.temp("temp.fits"), mode="update", memmap=True) old_data = hdul[1].data.copy() hdul[1].data = hdul[1].data + 1 hdul.close() with fits.open(self.temp("temp.fits"), memmap=True) as hdul: assert ((old_data + 1) == hdul[1].data).all() def test_open_file_with_bad_file_padding(self): """ Test warning when opening files with extra padding at the end. See https://github.com/astropy/astropy/issues/4351 """ # write some arbitrary data to a FITS file fits.writeto(self.temp("temp.fits"), np.arange(100)) # append some arbitrary number of zeros to the end with open(self.temp("temp.fits"), "ab") as fobj: fobj.write(b"\x00" * 1234) with pytest.warns( AstropyUserWarning, match="Unexpected extra padding at the end of the file." ) as w: with fits.open(self.temp("temp.fits")) as fobj: fobj.info() assert len(w) == 1 @pytest.mark.filterwarnings("ignore:Unexpected extra padding") def test_open_file_with_end_padding(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/106 Open files with end padding bytes. """ with fits.open(self.data("test0.fits"), do_not_scale_image_data=True) as hdul: info = hdul.info(output=False) hdul.writeto(self.temp("temp.fits")) with open(self.temp("temp.fits"), "ab") as f: f.seek(0, os.SEEK_END) f.write(b"\0" * 2880) assert info == fits.info( self.temp("temp.fits"), output=False, do_not_scale_image_data=True ) def test_open_file_with_bad_header_padding(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/136 Open files with nulls for header block padding instead of spaces. """ a = np.arange(100).reshape(10, 10) hdu = fits.PrimaryHDU(data=a) hdu.writeto(self.temp("temp.fits")) # Figure out where the header padding begins and fill it with nulls end_card_pos = str(hdu.header).index("END" + " " * 77) padding_start = end_card_pos + 80 padding_len = 2880 - padding_start with open(self.temp("temp.fits"), "r+b") as f: f.seek(padding_start) f.write(b"\0" * padding_len) with pytest.warns( AstropyUserWarning, match="contains null bytes instead of spaces" ) as w: with fits.open(self.temp("temp.fits")) as hdul: assert (hdul[0].data == a).all() assert len(w) == 1 assert len(hdul) == 1 assert str(hdul[0].header) == str(hdu.header) def test_update_with_truncated_header(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/148 Test that saving an update where the header is shorter than the original header doesn't leave a stump from the old header in the file. """ data = np.arange(100) hdu = fits.PrimaryHDU(data=data) idx = 1 while len(hdu.header) < 34: hdu.header[f"TEST{idx}"] = idx idx += 1 hdu.writeto(self.temp("temp.fits"), checksum=True) with fits.open(self.temp("temp.fits"), mode="update") as hdul: # Modify the header, forcing it to be rewritten hdul[0].header["TEST1"] = 2 with fits.open(self.temp("temp.fits")) as hdul: assert (hdul[0].data == data).all() def test_update_resized_header(self, home_is_temp): """ Test saving updates to a file where the header is one block smaller than before, and in the case where the heade ris one block larger than before. """ data = np.arange(100) hdu = fits.PrimaryHDU(data=data) idx = 1 while len(str(hdu.header)) <= 2880: hdu.header[f"TEST{idx}"] = idx idx += 1 orig_header = hdu.header.copy() hdu.writeto(self.temp("temp.fits")) with fits.open(self.temp("temp.fits"), mode="update") as hdul: while len(str(hdul[0].header)) > 2880: del hdul[0].header[-1] with fits.open(self.temp("temp.fits")) as hdul: assert hdul[0].header == orig_header[:-1] assert (hdul[0].data == data).all() with fits.open(self.temp("temp.fits"), mode="update") as hdul: idx = 101 while len(str(hdul[0].header)) <= 2880 * 2: hdul[0].header[f"TEST{idx}"] = idx idx += 1 # Touch something in the data too so that it has to be rewritten hdul[0].data[0] = 27 with fits.open(self.temp("temp.fits")) as hdul: assert hdul[0].header[:-37] == orig_header[:-1] assert hdul[0].data[0] == 27 assert (hdul[0].data[1:] == data[1:]).all() def test_update_resized_header2(self, home_is_temp): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/150 This is similar to test_update_resized_header, but specifically tests a case of multiple consecutive flush() calls on the same HDUList object, where each flush() requires a resize. """ data1 = np.arange(100) data2 = np.arange(100) + 100 phdu = fits.PrimaryHDU(data=data1) hdu = fits.ImageHDU(data=data2) phdu.writeto(self.temp("temp.fits")) with fits.open(self.temp("temp.fits"), mode="append") as hdul: hdul.append(hdu) with fits.open(self.temp("temp.fits"), mode="update") as hdul: idx = 1 while len(str(hdul[0].header)) <= 2880 * 2: hdul[0].header[f"TEST{idx}"] = idx idx += 1 hdul.flush() hdul.append(hdu) with fits.open(self.temp("temp.fits")) as hdul: assert (hdul[0].data == data1).all() assert hdul[1].header == hdu.header assert (hdul[1].data == data2).all() assert (hdul[2].data == data2).all() def test_hdul_fromstring(self): """ Test creating the HDUList structure in memory from a string containing an entire FITS file. This is similar to test_hdu_fromstring but for an entire multi-extension FITS file at once. """ # Tests HDUList.fromstring for all of Astropy's built in test files def test_fromstring(filename): with fits.open(filename) as hdul: orig_info = hdul.info(output=False) with open(filename, "rb") as f: dat = f.read() hdul2 = fits.HDUList.fromstring(dat) assert orig_info == hdul2.info(output=False) for idx in range(len(hdul)): assert hdul[idx].header == hdul2[idx].header if hdul[idx].data is None or hdul2[idx].data is None: assert hdul[idx].data == hdul2[idx].data elif hdul[idx].data.dtype.fields and hdul2[idx].data.dtype.fields: # Compare tables for n in hdul[idx].data.names: c1 = hdul[idx].data[n] c2 = hdul2[idx].data[n] assert (c1 == c2).all() elif any(dim == 0 for dim in hdul[idx].data.shape) or any( dim == 0 for dim in hdul2[idx].data.shape ): # For some reason some combinations of Python and Numpy # on Windows result in MemoryErrors when trying to work # on memmap arrays with more than one dimension but # some dimensions of size zero, so include a special # case for that return hdul[idx].data.shape == hdul2[idx].data.shape else: np.testing.assert_array_equal(hdul[idx].data, hdul2[idx].data) for filename in get_pkg_data_filenames("data", pattern="*.fits"): if sys.platform == "win32" and filename.endswith("zerowidth.fits"): # Running this test on this file causes a crash in some # versions of Numpy on Windows. See ticket: # https://aeon.stsci.edu/ssb/trac/pyfits/ticket/174 continue elif filename.endswith(("variable_length_table.fits", "theap-gap.fits")): # Comparing variable length arrays is non-trivial and thus # skipped at this point. # TODO: That's probably possible, so one could make it work. continue test_fromstring(filename) # Test that creating an HDUList from something silly raises a TypeError pytest.raises(TypeError, fits.HDUList.fromstring, ["a", "b", "c"]) @pytest.mark.filterwarnings("ignore:Saving a backup") def test_save_backup(self, home_is_temp): """Test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/121 Save backup of file before flushing changes. """ self.copy_file("scale.fits") with fits.open( self.temp("scale.fits"), mode="update", save_backup=True ) as hdul: # Make some changes to the original file to force its header # and data to be rewritten hdul[0].header["TEST"] = "TEST" # This emits warning that needs to be ignored at the # pytest.mark.filterwarnings level. hdul[0].data[0] = 0 assert os.path.exists(os.path.expanduser(self.temp("scale.fits.bak"))) with fits.open(self.data("scale.fits"), do_not_scale_image_data=True) as hdul1: with fits.open( self.temp("scale.fits.bak"), do_not_scale_image_data=True ) as hdul2: assert hdul1[0].header == hdul2[0].header assert (hdul1[0].data == hdul2[0].data).all() with fits.open( self.temp("scale.fits"), mode="update", save_backup=True ) as hdul: # One more time to see if multiple backups are made hdul[0].header["TEST2"] = "TEST" hdul[0].data[0] = 1 assert os.path.exists(os.path.expanduser(self.temp("scale.fits.bak"))) assert os.path.exists(os.path.expanduser(self.temp("scale.fits.bak.1"))) def test_replace_mmap_data(self): """Regression test for https://github.com/spacetelescope/PyFITS/issues/25 Replacing the mmap'd data of one file with mmap'd data from a different file should work. """ arr_a = np.arange(10) arr_b = arr_a * 2 def test(mmap_a, mmap_b): hdu_a = fits.PrimaryHDU(data=arr_a) hdu_a.writeto(self.temp("test_a.fits"), overwrite=True) hdu_b = fits.PrimaryHDU(data=arr_b) hdu_b.writeto(self.temp("test_b.fits"), overwrite=True) with fits.open( self.temp("test_a.fits"), mode="update", memmap=mmap_a ) as hdul_a: with fits.open(self.temp("test_b.fits"), memmap=mmap_b) as hdul_b: hdul_a[0].data = hdul_b[0].data with fits.open(self.temp("test_a.fits")) as hdul_a: assert np.all(hdul_a[0].data == arr_b) test(True, True) # Repeat the same test but this time don't mmap A test(False, True) # Finally, without mmaping B test(True, False) def test_replace_mmap_data_2(self): """Regression test for https://github.com/spacetelescope/PyFITS/issues/25 Replacing the mmap'd data of one file with mmap'd data from a different file should work. Like test_replace_mmap_data but with table data instead of image data. """ arr_a = np.arange(10) arr_b = arr_a * 2 def test(mmap_a, mmap_b): col_a = fits.Column(name="a", format="J", array=arr_a) col_b = fits.Column(name="b", format="J", array=arr_b) hdu_a = fits.BinTableHDU.from_columns([col_a]) hdu_a.writeto(self.temp("test_a.fits"), overwrite=True) hdu_b = fits.BinTableHDU.from_columns([col_b]) hdu_b.writeto(self.temp("test_b.fits"), overwrite=True) with fits.open( self.temp("test_a.fits"), mode="update", memmap=mmap_a ) as hdul_a: with fits.open(self.temp("test_b.fits"), memmap=mmap_b) as hdul_b: hdul_a[1].data = hdul_b[1].data with fits.open(self.temp("test_a.fits")) as hdul_a: assert "b" in hdul_a[1].columns.names assert "a" not in hdul_a[1].columns.names assert np.all(hdul_a[1].data["b"] == arr_b) test(True, True) # Repeat the same test but this time don't mmap A test(False, True) # Finally, without mmaping B test(True, False) def test_extname_in_hdulist(self): """ Tests to make sure that the 'in' operator works. Regression test for https://github.com/astropy/astropy/issues/3060 """ with fits.open(self.data("o4sp040b0_raw.fits")) as hdulist: hdulist.append(fits.ImageHDU(name="a")) assert "a" in hdulist assert "A" in hdulist assert ("a", 1) in hdulist assert ("A", 1) in hdulist assert "b" not in hdulist assert ("a", 2) not in hdulist assert ("b", 1) not in hdulist assert ("b", 2) not in hdulist assert hdulist[0] in hdulist assert fits.ImageHDU() not in hdulist def test_overwrite(self, home_is_temp): hdulist = fits.HDUList([fits.PrimaryHDU()]) hdulist.writeto(self.temp("test_overwrite.fits")) with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH): hdulist.writeto(self.temp("test_overwrite.fits"), overwrite=False) hdulist.writeto(self.temp("test_overwrite.fits"), overwrite=True) def test_invalid_hdu_key_in_contains(self): """ Make sure invalid keys in the 'in' operator return False. Regression test for https://github.com/astropy/astropy/issues/5583 """ hdulist = fits.HDUList(fits.PrimaryHDU()) hdulist.append(fits.ImageHDU()) hdulist.append(fits.ImageHDU()) # A more or less random assortment of things which are not valid keys. bad_keys = [None, 3.5, {}] for key in bad_keys: assert key not in hdulist def test_iteration_of_lazy_loaded_hdulist(self): """ Regression test for https://github.com/astropy/astropy/issues/5585 """ hdulist = fits.HDUList(fits.PrimaryHDU()) hdulist.append(fits.ImageHDU(name="SCI")) hdulist.append(fits.ImageHDU(name="SCI")) hdulist.append(fits.ImageHDU(name="nada")) hdulist.append(fits.ImageHDU(name="SCI")) filename = self.temp("many_extension.fits") hdulist.writeto(filename) f = fits.open(filename) # Check that all extensions are read if f is not sliced all_exts = [ext for ext in f] assert len(all_exts) == 5 # Reload the file to ensure we are still lazy loading f.close() f = fits.open(filename) # Try a simple slice with no conditional on the ext. This is essentially # the reported failure. all_exts_but_zero = [ext for ext in f[1:]] assert len(all_exts_but_zero) == 4 # Reload the file to ensure we are still lazy loading f.close() f = fits.open(filename) # Check whether behavior is proper if the upper end of the slice is not # omitted. read_exts = [ext for ext in f[1:4] if ext.header["EXTNAME"] == "SCI"] assert len(read_exts) == 2 f.close() def test_read_non_standard_hdu(self): filename = self.temp("bad-fits.fits") hdu = fits.PrimaryHDU() hdu.header["FOO"] = "BAR" buf = io.BytesIO() hdu.writeto(buf) buf.seek(0) hdustr = buf.read() hdustr = hdustr.replace( b"SIMPLE = T", b"SIMPLE = F" ) with open(filename, mode="wb") as f: f.write(hdustr) with fits.open(filename) as hdul: assert isinstance(hdul[0], _NonstandardHDU) assert hdul[0].header["FOO"] == "BAR" def test_proper_error_raised_on_non_fits_file(self): filename = self.temp("not-fits.fits") with open(filename, mode="w", encoding="utf=8") as f: f.write("Not a FITS file") match = ( "No SIMPLE card found, this file does not appear to be a valid FITS file" ) # This should raise an OSError because there is no end card. with pytest.raises(OSError, match=match): fits.open(filename) with pytest.raises(OSError, match=match): fits.open(filename, mode="append") with pytest.raises(OSError, match=match): fits.open(filename, mode="update") def test_proper_error_raised_on_invalid_fits_file(self): filename = self.temp("bad-fits.fits") hdu = fits.PrimaryHDU() hdu.header["FOO"] = "BAR" buf = io.BytesIO() hdu.writeto(buf) # write 80 additional bytes so the block will have the correct size buf.write(b" " * 80) buf.seek(0) buf.seek(80) # now remove the SIMPLE card with open(filename, mode="wb") as f: f.write(buf.read()) match = ( "No SIMPLE card found, this file does not appear to be a valid FITS file" ) # This should raise an OSError because there is no end card. with pytest.raises(OSError, match=match): fits.open(filename) with pytest.raises(OSError, match=match): fits.open(filename, mode="append") with pytest.raises(OSError, match=match): fits.open(filename, mode="update") with fits.open(filename, ignore_missing_simple=True) as hdul: assert isinstance(hdul[0], _ValidHDU) assert hdul[0].header["FOO"] == "BAR" def test_warning_raised_on_non_standard_simple_card(self): filename = self.temp("bad-fits.fits") hdu = fits.PrimaryHDU() hdu.header["FOO"] = "BAR" buf = io.BytesIO() hdu.writeto(buf) # change the simple card format buf.seek(0) buf.write(b"SIMPLE = T ") buf.seek(0) with open(filename, mode="wb") as f: f.write(buf.read()) match = "Found a SIMPLE card but its format doesn't respect the FITS Standard" with pytest.warns(VerifyWarning, match=match): fits.open(filename) with pytest.warns(VerifyWarning, match=match): fits.open(filename, mode="append") with pytest.warns(VerifyWarning, match=match): fits.open(filename, mode="update") with fits.open(filename, ignore_missing_simple=True) as hdul: assert isinstance(hdul[0], _ValidHDU) assert hdul[0].header["FOO"] == "BAR" # change the simple card format buf.seek(0) buf.write(b"SIMPLE = T / This is a FITS file") buf.seek(0) with open(filename, mode="wb") as f: f.write(buf.read()) with pytest.warns(VerifyWarning, match=match): fits.open(filename) def test_proper_error_raised_on_non_fits_file_with_unicode(self): """ Regression test for https://github.com/astropy/astropy/issues/5594 The failure shows up when (in python 3+) you try to open a file with unicode content that is not actually a FITS file. See: https://github.com/astropy/astropy/issues/5594#issuecomment-266583218 """ filename = self.temp("not-fits-with-unicode.fits") with open(filename, mode="w", encoding="utf=8") as f: f.write("Ce\xe7i ne marche pas") # This should raise an OSError because there is no end card. with pytest.raises( OSError, match=( "No SIMPLE card found, this file " "does not appear to be a valid FITS file" ), ): fits.open(filename) def test_no_resource_warning_raised_on_non_fits_file(self): """ Regression test for https://github.com/astropy/astropy/issues/6168 The ResourceWarning shows up when (in python 3+) you try to open a non-FITS file when using a filename. """ # To avoid creating the file multiple times the tests are # all included in one test file. See the discussion to the # PR at https://github.com/astropy/astropy/issues/6168 # filename = self.temp("not-fits.fits") with open(filename, mode="w") as f: f.write("# header line\n") f.write("0.1 0.2\n") # Opening the file should raise an OSError however the file # is opened (there are two distinct code paths, depending on # whether ignore_missing_end is True or False). # # Explicit tests are added to make sure the file handle is not # closed when passed in to fits.open. In this case the ResourceWarning # was not raised. # Make sure that files opened by the user are not closed with open(filename, mode="rb") as f: with pytest.raises(OSError): fits.open(f, ignore_missing_end=False) assert not f.closed with open(filename, mode="rb") as f: with pytest.raises(OSError), pytest.warns(VerifyWarning): fits.open(f, ignore_missing_end=True) assert not f.closed with pytest.raises(OSError): fits.open(filename, ignore_missing_end=False) with pytest.raises(OSError), pytest.warns(VerifyWarning): fits.open(filename, ignore_missing_end=True) def test_pop_with_lazy_load(self): filename = self.data("checksum.fits") with fits.open(filename) as hdul: # Try popping the hdulist before doing anything else. This makes sure # that https://github.com/astropy/astropy/issues/7185 is fixed. hdu = hdul.pop() assert len(hdul) == 1 # Read the file again and try popping from the beginning with fits.open(filename) as hdul2: hdu2 = hdul2.pop(0) assert len(hdul2) == 1 # Just a sanity check with fits.open(filename) as hdul3: assert len(hdul3) == 2 assert hdul3[0].header == hdu2.header assert hdul3[1].header == hdu.header def test_pop_extname(self): with fits.open(self.data("o4sp040b0_raw.fits")) as hdul: assert len(hdul) == 7 hdu1 = hdul[1] hdu4 = hdul[4] hdu_popped = hdul.pop(("SCI", 2)) assert len(hdul) == 6 assert hdu_popped is hdu4 hdu_popped = hdul.pop("SCI") assert len(hdul) == 5 assert hdu_popped is hdu1 # Skip due to https://github.com/astropy/astropy/issues/8916 @pytest.mark.skipif( sys.platform.startswith("win32"), reason="Cannot test on Windows" ) def test_write_hdulist_to_stream(self): """ Unit test for https://github.com/astropy/astropy/issues/7435 to ensure that an HDUList can be written to a stream. """ data = np.array([[1, 2, 3], [4, 5, 6]]) hdu = fits.PrimaryHDU(data) hdulist = fits.HDUList([hdu]) with open(self.temp("test.fits"), "wb") as fout: with subprocess.Popen(["cat"], stdin=subprocess.PIPE, stdout=fout) as p: hdulist.writeto(p.stdin) def test_output_verify(self): hdul = fits.HDUList([fits.PrimaryHDU()]) hdul[0].header["FOOBAR"] = 42 hdul.writeto(self.temp("test.fits")) with open(self.temp("test.fits"), "rb") as f: data = f.read() # create invalid card data = data.replace(b"FOOBAR =", b"FOOBAR = ") with open(self.temp("test2.fits"), "wb") as f: f.write(data) with pytest.raises(VerifyError): with fits.open(self.temp("test2.fits"), mode="update") as hdul: hdul[0].header["MORE"] = "here" with pytest.warns(VerifyWarning) as ww: with fits.open( self.temp("test2.fits"), mode="update", output_verify="fix+warn" ) as hdul: hdul[0].header["MORE"] = "here" assert len(ww) == 6 msg = "Card 'FOOBAR ' is not FITS standard (equal sign not at column 8)" assert msg in str(ww[3].message)
0e46e72ffdb2c8a33add9cc05da5ac4a077cd67310402e1f68ff7d1811a41a2e
# Licensed under a 3-clause BSD style license - see PYFITS.rst import os import pathlib import shutil import stat import tempfile import time import pytest from astropy.io import fits @pytest.fixture( params=[False, "str", "pathlib"], ids=["", "home_is_data", "home_is_data, pathlib"] ) def home_is_data(request, monkeypatch): """ Pytest fixture to run a test case both with and without tilde paths. In the tilde-path case, calls like self.data('filename.fits') will produce '~/filename.fits', and environment variables will be temporarily modified so that '~' resolves to the data directory. """ # This checks the value specified in the fixture annotation if request.param: # `request.instance` refers to the test case that's using this fixture. request.instance.monkeypatch = monkeypatch request.instance.set_home_as_data() request.instance.set_paths_via_pathlib(request.param == "pathlib") @pytest.fixture( params=[False, "str", "pathlib"], ids=["", "home_is_data", "home_is_data, pathlib"] ) def home_is_temp(request, monkeypatch): """ Pytest fixture to run a test case both with and without tilde paths. In the tilde-path case, calls like self.temp('filename.fits') will produce '~/filename.fits', and environment variables will be temporarily modified so that '~' resolves to the temp directory. These files will also be tracked so that, after the test case, we can verify no files were written to a literal tilde path. """ # This checks the value specified in the fixture annotation if request.param: # `request.instance` refers to the test case that's using this fixture. request.instance.monkeypatch = monkeypatch request.instance.set_home_as_temp() request.instance.set_paths_via_pathlib(request.param == "pathlib") class FitsTestCase: def setup_method(self): self.data_dir = os.path.join(os.path.dirname(__file__), "data") self.temp_dir = tempfile.mkdtemp(prefix="fits-test-") self.home_is_data = False self.home_is_temp = False self.temp_files_used = set() self.use_pathlib = False # Restore global settings to defaults # TODO: Replace this when there's a better way to in the config API to # force config values to their defaults fits.conf.enable_record_valued_keyword_cards = True fits.conf.extension_name_case_sensitive = False fits.conf.strip_header_whitespace = True fits.conf.use_memmap = True def teardown_method(self): if self.home_is_temp: # Verify that no files were written to a literal tilde path for temp_file, temp_file_no_tilde in self.temp_files_used: assert not os.path.exists(temp_file) assert os.path.exists(temp_file_no_tilde) if hasattr(self, "temp_dir") and os.path.exists(self.temp_dir): tries = 3 while tries: try: shutil.rmtree(self.temp_dir) break except OSError: # Probably couldn't delete the file because for whatever # reason a handle to it is still open/hasn't been # garbage-collected time.sleep(0.5) tries -= 1 fits.conf.reset("enable_record_valued_keyword_cards") fits.conf.reset("extension_name_case_sensitive") fits.conf.reset("strip_header_whitespace") fits.conf.reset("use_memmap") def copy_file(self, filename): """Copies a backup of a test data file to the temp dir and sets its mode to writeable. """ shutil.copy( os.path.expanduser(self.data(filename)), os.path.expanduser(self.temp(filename)), ) os.chmod(os.path.expanduser(self.temp(filename)), stat.S_IREAD | stat.S_IWRITE) def data(self, filename): """Returns the path to a test data file.""" if self.home_is_data: prefix = "~" else: prefix = self.data_dir if self.use_pathlib: return pathlib.Path(prefix, filename) return os.path.join(prefix, filename) def temp(self, filename): """Returns the full path to a file in the test temp dir.""" real_target = os.path.join(self.temp_dir, filename) if self.home_is_temp: prefix = "~" # Record the '~' path and the intended path, for use # in `home_is_temp` self.temp_files_used.add((os.path.join(prefix, filename), real_target)) else: prefix = self.temp_dir if self.use_pathlib: return pathlib.Path(prefix, filename) return os.path.join(prefix, filename) def set_home_as_data(self): """ This overrides the HOME environment variable, so that paths beginning with '~/' expand to the data directory. Used by the `home_is_data` fixture. """ self.home_is_data = True # For Unix self.monkeypatch.setenv("HOME", self.data_dir) # For Windows self.monkeypatch.setenv("USERPROFILE", self.data_dir) def set_home_as_temp(self): """ This overrides the HOME environment variable, so that paths beginning with '~/' expand to the temp directory. In conjunction with self.temp(), temporary files are tracked as they are created, so we can verify they end up in the temporary directory and not unexpected places in the filesystem. Used by the `home_is_temp` fixture. """ self.home_is_temp = True # For Unix self.monkeypatch.setenv("HOME", self.temp_dir) # For Windows self.monkeypatch.setenv("USERPROFILE", self.temp_dir) def set_paths_via_pathlib(self, use_pathlib): self.use_pathlib = use_pathlib
6b3ff616f7f875ee7b466d5f231bfbcae2032a4828800992cd7515a8f910a2fa
# Licensed under a 3-clause BSD style license - see PYFITS.rst import collections import copy import warnings from io import BytesIO, StringIO import numpy as np import pytest from astropy.io import fits from astropy.io.fits.card import _pad from astropy.io.fits.header import _pad_length from astropy.io.fits.util import encode_ascii from astropy.io.fits.verify import VerifyError, VerifyWarning from astropy.utils.exceptions import AstropyUserWarning from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH from .conftest import FitsTestCase def test_shallow_copy(): """Make sure that operations on a shallow copy do not alter the original. #4990.""" original_header = fits.Header([("a", 1), ("b", 1)]) copied_header = copy.copy(original_header) # Modifying the original dict should not alter the copy original_header["c"] = 100 assert "c" not in copied_header # and changing the copy should not change the original. copied_header["a"] = 0 assert original_header["a"] == 1 def test_init_with_header(): """Make sure that creating a Header from another Header makes a copy if copy is True.""" original_header = fits.Header([("a", 10)]) new_header = fits.Header(original_header, copy=True) original_header["a"] = 20 assert new_header["a"] == 10 new_header["a"] = 0 assert original_header["a"] == 20 def test_init_with_dict(): dict1 = {"a": 11, "b": 12, "c": 13, "d": 14, "e": 15} h1 = fits.Header(dict1) for i in dict1: assert dict1[i] == h1[i] def test_init_with_ordereddict(): # Create a list of tuples. Each tuple consisting of a letter and the number list1 = [(i, j) for j, i in enumerate("abcdefghijklmnopqrstuvwxyz")] # Create an ordered dictionary and a header from this dictionary dict1 = collections.OrderedDict(list1) h1 = fits.Header(dict1) # Check that the order is preserved of the initial list assert all(h1[val] == list1[i][1] for i, val in enumerate(h1)) class TestHeaderFunctions(FitsTestCase): """Test Header and Card objects.""" def test_rename_keyword(self): """Test renaming keyword with rename_keyword.""" header = fits.Header([("A", "B", "C"), ("D", "E", "F")]) header.rename_keyword("A", "B") assert "A" not in header assert "B" in header assert header[0] == "B" assert header["B"] == "B" assert header.comments["B"] == "C" @pytest.mark.parametrize("key", ["A", "a"]) def test_indexing_case(self, key): """Check that indexing is case insensitive""" header = fits.Header([("A", "B", "C"), ("D", "E", "F")]) assert key in header assert header[key] == "B" assert header.get(key) == "B" assert header.index(key) == 0 assert header.comments[key] == "C" assert header.count(key) == 1 header.remove(key, ignore_missing=False) def test_card_constructor_default_args(self): """Test Card constructor with default argument values.""" c = fits.Card() assert c.keyword == "" def test_card_from_bytes(self): """ Test loading a Card from a `bytes` object (assuming latin-1 encoding). """ c = fits.Card.fromstring(b"ABC = 'abc'") assert c.keyword == "ABC" assert c.value == "abc" def test_string_value_card(self): """Test Card constructor with string value""" c = fits.Card("abc", "<8 ch") assert str(c) == _pad("ABC = '<8 ch '") c = fits.Card("nullstr", "") assert str(c) == _pad("NULLSTR = ''") def test_boolean_value_card(self): """Test Card constructor with boolean value""" c = fits.Card("abc", True) assert str(c) == _pad("ABC = T") c = fits.Card.fromstring("ABC = F") assert c.value is False def test_long_integer_value_card(self): """Test Card constructor with long integer value""" c = fits.Card("long_int", -467374636747637647347374734737437) assert str(c) == _pad("LONG_INT= -467374636747637647347374734737437") def test_floating_point_value_card(self): """Test Card constructor with floating point value""" c = fits.Card("floatnum", -467374636747637647347374734737437.0) if str(c) != _pad("FLOATNUM= -4.6737463674763E+32") and str(c) != _pad( "FLOATNUM= -4.6737463674763E+032" ): assert str(c) == _pad("FLOATNUM= -4.6737463674763E+32") def test_complex_value_card(self): """Test Card constructor with complex value""" c = fits.Card("abc", (1.2345377437887837487e88 + 6324767364763746367e-33j)) f1 = _pad("ABC = (1.23453774378878E+88, 6.32476736476374E-15)") f2 = _pad("ABC = (1.2345377437887E+088, 6.3247673647637E-015)") f3 = _pad("ABC = (1.23453774378878E+88, 6.32476736476374E-15)") if str(c) != f1 and str(c) != f2: assert str(c) == f3 def test_card_image_constructed_too_long(self): """Test that over-long cards truncate the comment""" # card image constructed from key/value/comment is too long # (non-string value) c = fits.Card("abc", 9, "abcde" * 20) with pytest.warns(fits.verify.VerifyWarning): assert ( str(c) == "ABC = 9 " "/ abcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeab" ) c = fits.Card("abc", "a" * 68, "abcdefg") with pytest.warns(fits.verify.VerifyWarning): assert str(c) == f"ABC = '{'a' * 68}'" def test_constructor_filter_illegal_data_structures(self): """Test that Card constructor raises exceptions on bad arguments""" pytest.raises(ValueError, fits.Card, ("abc",), {"value": (2, 3)}) pytest.raises(ValueError, fits.Card, "key", [], "comment") def test_keyword_too_long(self): """Test that long Card keywords are allowed, but with a warning""" pytest.warns(UserWarning, fits.Card, "abcdefghi", "long") def test_illegal_characters_in_key(self): """ Test that Card constructor allows illegal characters in the keyword, but creates a HIERARCH card. """ # This test used to check that a ValueError was raised, because a # keyword like 'abc+' was simply not allowed. Now it should create a # HIERARCH card. with pytest.warns(AstropyUserWarning) as w: c = fits.Card("abc+", 9) assert len(w) == 1 assert c.image == _pad("HIERARCH abc+ = 9") def test_add_history(self): header = fits.Header( [ ("A", "B", "C"), ("HISTORY", 1), ("HISTORY", 2), ("HISTORY", 3), ("", "", ""), ("", "", ""), ] ) header.add_history(4) # One of the blanks should get used, so the length shouldn't change assert len(header) == 6 assert header.cards[4].value == 4 assert header["HISTORY"] == [1, 2, 3, 4] assert repr(header["HISTORY"]) == "1\n2\n3\n4" header.add_history(0, after="A") assert len(header) == 6 assert header.cards[1].value == 0 assert header["HISTORY"] == [0, 1, 2, 3, 4] def test_add_blank(self): header = fits.Header( [("A", "B", "C"), ("", 1), ("", 2), ("", 3), ("", "", ""), ("", "", "")] ) header.add_blank(4) # This time a new blank should be added, and the existing blanks don't # get used... (though this is really kinda sketchy--there's a # distinction between truly blank cards, and cards with blank keywords # that isn't currently made int he code) assert len(header) == 7 assert header.cards[6].value == 4 assert header[""] == [1, 2, 3, "", "", 4] assert repr(header[""]) == "1\n2\n3\n\n\n4" header.add_blank(0, after="A") assert len(header) == 8 assert header.cards[1].value == 0 assert header[""] == [0, 1, 2, 3, "", "", 4] header[""] = 5 header[" "] = 6 assert header[""] == [0, 1, 2, 3, "", "", 4, 5, 6] assert header[" "] == [0, 1, 2, 3, "", "", 4, 5, 6] def test_update(self): class FakeHeader(list): def keys(self): return [l[0] for l in self] def __getitem__(self, key): return next(l[1:] for l in self if l[0] == key) header = fits.Header() header.update({"FOO": ("BAR", "BAZ")}) header.update(FakeHeader([("A", 1), ("B", 2, "comment")])) assert set(header.keys()) == {"FOO", "A", "B"} assert header.comments["B"] == "comment" # test that comments are preserved tmphdr = fits.Header() tmphdr["HELLO"] = (1, "this is a comment") header.update(tmphdr) assert set(header.keys()) == {"FOO", "A", "B", "HELLO"} assert header.comments["HELLO"] == "this is a comment" header.update(NAXIS1=100, NAXIS2=100) assert set(header.keys()) == {"FOO", "A", "B", "HELLO", "NAXIS1", "NAXIS2"} assert set(header.values()) == {"BAR", 1, 2, 100, 100} def test_update_comment(self): hdul = fits.open(self.data("arange.fits")) hdul[0].header.update({"FOO": ("BAR", "BAZ")}) assert hdul[0].header["FOO"] == "BAR" assert hdul[0].header.comments["FOO"] == "BAZ" with pytest.raises(ValueError): hdul[0].header.update({"FOO2": ("BAR", "BAZ", "EXTRA")}) hdul.writeto(self.temp("test.fits")) hdul.close() hdul = fits.open(self.temp("test.fits"), mode="update") hdul[0].header.comments["FOO"] = "QUX" hdul.close() hdul = fits.open(self.temp("test.fits")) assert hdul[0].header.comments["FOO"] == "QUX" hdul[0].header.add_comment(0, after="FOO") assert str(hdul[0].header.cards[-1]).strip() == "COMMENT 0" hdul.close() def test_commentary_cards(self): # commentary cards val = "A commentary card's value has no quotes around it." c = fits.Card("HISTORY", val) assert str(c) == _pad("HISTORY " + val) val = "A commentary card has no comment." c = fits.Card("COMMENT", val, "comment") assert str(c) == _pad("COMMENT " + val) def test_commentary_card_created_by_fromstring(self): # commentary card created by fromstring() c = fits.Card.fromstring( "COMMENT card has no comments. " "/ text after slash is still part of the value." ) assert ( c.value == "card has no comments. " "/ text after slash is still part of the value." ) assert c.comment == "" def test_commentary_card_will_not_parse_numerical_value(self): # commentary card will not parse the numerical value c = fits.Card.fromstring("HISTORY (1, 2)") assert str(c) == _pad("HISTORY (1, 2)") def test_equal_sign_after_column8(self): # equal sign after column 8 of a commentary card will be part of the # string value c = fits.Card.fromstring("HISTORY = (1, 2)") assert str(c) == _pad("HISTORY = (1, 2)") def test_blank_keyword(self): c = fits.Card("", " / EXPOSURE INFORMATION") assert str(c) == _pad(" / EXPOSURE INFORMATION") c = fits.Card.fromstring(str(c)) assert c.keyword == "" assert c.value == " / EXPOSURE INFORMATION" def test_specify_undefined_value(self): # this is how to specify an undefined value c = fits.Card("undef", fits.card.UNDEFINED) assert str(c) == _pad("UNDEF =") def test_complex_number_using_string_input(self): # complex number using string input c = fits.Card.fromstring("ABC = (8, 9)") assert str(c) == _pad("ABC = (8, 9)") def test_fixable_non_standard_fits_card(self, capsys): # fixable non-standard FITS card will keep the original format c = fits.Card.fromstring("abc = + 2.1 e + 12") assert c.value == 2100000000000.0 with pytest.warns( fits.verify.VerifyWarning, match=r"Verification reported errors" ): assert str(c) == _pad("ABC = +2.1E+12") def test_fixable_non_fsc(self): # fixable non-FSC: if the card is not parsable, it's value will be # assumed # to be a string and everything after the first slash will be comment c = fits.Card.fromstring( "no_quote= this card's value has no quotes / let's also try the comment" ) with pytest.warns( fits.verify.VerifyWarning, match=r"Verification reported errors" ): assert ( str(c) == "NO_QUOTE= 'this card''s value has no quotes' " "/ let's also try the comment " ) def test_undefined_value_using_string_input(self): # undefined value using string input c = fits.Card.fromstring("ABC = ") assert str(c) == _pad("ABC =") def test_undefined_keys_values(self): header = fits.Header() header["FOO"] = "BAR" header["UNDEF"] = None assert list(header.values()) == ["BAR", None] assert list(header.items()) == [("FOO", "BAR"), ("UNDEF", None)] def test_mislocated_equal_sign(self, capsys): # test mislocated "=" sign c = fits.Card.fromstring("XYZ= 100") assert c.keyword == "XYZ" assert c.value == 100 with pytest.warns( fits.verify.VerifyWarning, match=r"Verification reported errors" ): assert str(c) == _pad("XYZ = 100") def test_equal_only_up_to_column_10(self, capsys): # the test of "=" location is only up to column 10 # This test used to check if Astropy rewrote this card to a new format, # something like "HISTO = '= (1, 2)". But since ticket #109 if the # format is completely wrong we don't make any assumptions and the card # should be left alone c = fits.Card.fromstring("HISTO = (1, 2)") with pytest.warns(AstropyUserWarning, match=r"header keyword is invalid"): assert str(c) == _pad("HISTO = (1, 2)") # Likewise this card should just be left in its original form and # we shouldn't guess how to parse it or rewrite it. c = fits.Card.fromstring(" HISTORY (1, 2)") with pytest.warns(AstropyUserWarning, match=r"header keyword is invalid"): assert str(c) == _pad(" HISTORY (1, 2)") def test_verify_invalid_equal_sign(self): # verification c = fits.Card.fromstring("ABC= a6") with pytest.warns(AstropyUserWarning) as w: c.verify() err_text1 = "Card 'ABC' is not FITS standard (equal sign not at column 8)" err_text2 = "Card 'ABC' is not FITS standard (invalid value string: 'a6'" assert len(w) == 4 assert err_text1 in str(w[1].message) assert err_text2 in str(w[2].message) def test_fix_invalid_equal_sign(self): fix_text = "Fixed 'ABC' card to meet the FITS standard." c = fits.Card.fromstring("ABC= a6") with pytest.warns(AstropyUserWarning, match=fix_text) as w: c.verify("fix") assert len(w) == 4 assert str(c) == _pad("ABC = 'a6 '") def test_long_string_value(self): # test long string value c = fits.Card("abc", "long string value " * 10, "long comment " * 10) assert ( str(c) == "ABC = 'long string value long string value long string value long string &' " "CONTINUE 'value long string value long string value long string value long &' " "CONTINUE 'string value long string value long string value &' " "CONTINUE '&' / long comment long comment long comment long comment long " "CONTINUE '&' / comment long comment long comment long comment long comment " "CONTINUE '' / long comment " ) def test_long_string_value_with_multiple_long_words(self): """ Regression test for https://github.com/astropy/astropy/issues/11298 """ c = fits.Card( "WHATEVER", "SuperCalibrationParameters_XXXX_YYYY_ZZZZZ_KK_01_02_" "03)-AAABBBCCC.n.h5 SuperNavigationParameters_XXXX_YYYY" "_ZZZZZ_KK_01_02_03)-AAABBBCCC.n.xml", ) assert ( str(c) == "WHATEVER= 'SuperCalibrationParameters_XXXX_YYYY_ZZZZZ_KK_01_02_03)-AAABBBCCC.n&'" "CONTINUE '.h5 &' " "CONTINUE 'SuperNavigationParameters_XXXX_YYYY_ZZZZZ_KK_01_02_03)-AAABBBCCC.n.&'" "CONTINUE 'xml' " ) def test_long_unicode_string(self): """Regression test for https://github.com/spacetelescope/PyFITS/issues/1 So long as a unicode string can be converted to ASCII it should have no different behavior in this regard from a byte string. """ h1 = fits.Header() h1["TEST"] = "abcdefg" * 30 h2 = fits.Header() h2["TEST"] = "abcdefg" * 30 assert str(h1) == str(h2) def test_long_string_repr(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/193 Ensure that the __repr__() for cards represented with CONTINUE cards is split across multiple lines (broken at each *physical* card). """ header = fits.Header() header["TEST1"] = ("Regular value", "Regular comment") header["TEST2"] = ("long string value " * 10, "long comment " * 10) header["TEST3"] = ("Regular value", "Regular comment") assert repr(header).splitlines() == [ str(fits.Card("TEST1", "Regular value", "Regular comment")), "TEST2 = 'long string value long string value long string value long string &' ", "CONTINUE 'value long string value long string value long string value long &' ", "CONTINUE 'string value long string value long string value &' ", "CONTINUE '&' / long comment long comment long comment long comment long ", "CONTINUE '&' / comment long comment long comment long comment long comment ", "CONTINUE '' / long comment ", str(fits.Card("TEST3", "Regular value", "Regular comment")), ] def test_blank_keyword_long_value(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/194 Test that a blank keyword ('') can be assigned a too-long value that is continued across multiple cards with blank keywords, just like COMMENT and HISTORY cards. """ value = "long string value " * 10 header = fits.Header() header[""] = value assert len(header) == 3 assert " ".join(header[""]) == value.rstrip() # Ensure that this works like other commentary keywords header["COMMENT"] = value header["HISTORY"] = value assert header["COMMENT"] == header["HISTORY"] assert header["COMMENT"] == header[""] def test_long_string_from_file(self): c = fits.Card("abc", "long string value " * 10, "long comment " * 10) hdu = fits.PrimaryHDU() hdu.header.append(c) hdu.writeto(self.temp("test_new.fits")) hdul = fits.open(self.temp("test_new.fits")) c = hdul[0].header.cards["abc"] hdul.close() assert ( str(c) == "ABC = 'long string value long string value long string value long string &' " "CONTINUE 'value long string value long string value long string value long &' " "CONTINUE 'string value long string value long string value &' " "CONTINUE '&' / long comment long comment long comment long comment long " "CONTINUE '&' / comment long comment long comment long comment long comment " "CONTINUE '' / long comment " ) def test_word_in_long_string_too_long(self): # if a word in a long string is too long, it will be cut in the middle c = fits.Card("abc", "longstringvalue" * 10, "longcomment" * 10) assert ( str(c) == "ABC = 'longstringvaluelongstringvaluelongstringvaluelongstringvaluelongstr&'" "CONTINUE 'ingvaluelongstringvaluelongstringvaluelongstringvaluelongstringvalu&'" "CONTINUE 'elongstringvalue&' " "CONTINUE '&' / longcommentlongcommentlongcommentlongcommentlongcommentlongcomme" "CONTINUE '' / ntlongcommentlongcommentlongcommentlongcomment " ) def test_long_string_value_via_fromstring(self, capsys): # long string value via fromstring() method c = fits.Card.fromstring( _pad("abc = 'longstring''s testing & ' / comments in line 1") + _pad( "continue 'continue with long string but without the " "ampersand at the end' /" ) + _pad( "continue 'continue must have string value (with quotes)' " "/ comments with ''. " ) ) with pytest.warns( fits.verify.VerifyWarning, match=r"Verification reported errors" ): assert ( str(c) == "ABC = 'longstring''s testing continue with long string but without the &' " "CONTINUE 'ampersand at the endcontinue must have string value (with quotes)&' " "CONTINUE '' / comments in line 1 comments with ''. " ) def test_continue_card_with_equals_in_value(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/117 """ c = fits.Card.fromstring( _pad( "EXPR = '/grp/hst/cdbs//grid/pickles/dat_uvk/pickles_uk_10.fits * &'" ) + _pad("CONTINUE '5.87359e-12 * MWAvg(Av=0.12)&'") + _pad("CONTINUE '&' / pysyn expression") ) assert c.keyword == "EXPR" assert ( c.value == "/grp/hst/cdbs//grid/pickles/dat_uvk/pickles_uk_10.fits " "* 5.87359e-12 * MWAvg(Av=0.12)" ) assert c.comment == "pysyn expression" def test_final_continue_card_lacks_ampersand(self): """ Regression test for https://github.com/astropy/astropy/issues/3282 """ h = fits.Header() h["SVALUE"] = "A" * 69 assert repr(h).splitlines()[-1] == _pad("CONTINUE 'AA'") def test_final_continue_card_ampersand_removal_on_long_comments(self): """ Regression test for https://github.com/astropy/astropy/issues/3282 """ c = fits.Card("TEST", "long value" * 10, "long comment &" * 10) assert ( str(c) == "TEST = 'long valuelong valuelong valuelong valuelong valuelong valuelong &' " "CONTINUE 'valuelong valuelong valuelong value&' " "CONTINUE '&' / long comment &long comment &long comment &long comment &long " "CONTINUE '&' / comment &long comment &long comment &long comment &long comment " "CONTINUE '' / &long comment & " ) def test_hierarch_card_creation(self): # Test automatic upgrade to hierarch card with pytest.warns( AstropyUserWarning, match="HIERARCH card will be created" ) as w: c = fits.Card( "ESO INS SLIT2 Y1FRML", "ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)", ) assert len(w) == 1 assert ( str(c) == "HIERARCH ESO INS SLIT2 Y1FRML= " "'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)'" ) # Test manual creation of hierarch card c = fits.Card("hierarch abcdefghi", 10) assert str(c) == _pad("HIERARCH abcdefghi = 10") c = fits.Card( "HIERARCH ESO INS SLIT2 Y1FRML", "ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)", ) assert ( str(c) == "HIERARCH ESO INS SLIT2 Y1FRML= " "'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)'" ) def test_hierarch_with_abbrev_value_indicator(self): """Regression test for https://github.com/spacetelescope/PyFITS/issues/5 """ c = fits.Card.fromstring("HIERARCH key.META_4='calFileVersion'") assert c.keyword == "key.META_4" assert c.value == "calFileVersion" assert c.comment == "" def test_hierarch_not_warn(self): """Check that compressed image headers do not issue HIERARCH warnings.""" filename = fits.util.get_testdata_filepath("compressed_image.fits") with fits.open(filename) as hdul: header = hdul[1].header with warnings.catch_warnings(record=True) as warning_list: header["HIERARCH LONG KEYWORD"] = 42 assert len(warning_list) == 0 assert header["LONG KEYWORD"] == 42 assert header["HIERARCH LONG KEYWORD"] == 42 # Check that it still warns if we do not use HIERARCH with pytest.warns( fits.verify.VerifyWarning, match=r"greater than 8 characters" ): header["LONG KEYWORD2"] = 1 assert header["LONG KEYWORD2"] == 1 def test_hierarch_keyword_whitespace(self): """ Regression test for https://github.com/spacetelescope/PyFITS/issues/6 Make sure any leading or trailing whitespace around HIERARCH keywords is stripped from the actual keyword value. """ c = fits.Card.fromstring("HIERARCH key.META_4 = 'calFileVersion'") assert c.keyword == "key.META_4" assert c.value == "calFileVersion" assert c.comment == "" # Test also with creation via the Card constructor c = fits.Card("HIERARCH key.META_4", "calFileVersion") assert c.keyword == "key.META_4" assert c.value == "calFileVersion" assert c.comment == "" def test_verify_mixed_case_hierarch(self): """Regression test for https://github.com/spacetelescope/PyFITS/issues/7 Assures that HIERARCH keywords with lower-case characters and other normally invalid keyword characters are not considered invalid. """ c = fits.Card("HIERARCH WeirdCard.~!@#_^$%&", "The value", "a comment") # This should not raise any exceptions c.verify("exception") assert c.keyword == "WeirdCard.~!@#_^$%&" assert c.value == "The value" assert c.comment == "a comment" # Test also the specific case from the original bug report header = fits.Header( [ ("simple", True), ("BITPIX", 8), ("NAXIS", 0), ("EXTEND", True, "May contain datasets"), ("HIERARCH key.META_0", "detRow"), ] ) hdu = fits.PrimaryHDU(header=header) hdu.writeto(self.temp("test.fits")) with fits.open(self.temp("test.fits")) as hdul: header2 = hdul[0].header assert str(header.cards[header.index("key.META_0")]) == str( header2.cards[header2.index("key.META_0")] ) def test_missing_keyword(self): """Test that accessing a non-existent keyword raises a KeyError.""" header = fits.Header() # De-referencing header through the inline function should behave # identically to accessing it in the pytest.raises context below. pytest.raises(KeyError, lambda k: header[k], "NAXIS") # Test exception with message with pytest.raises(KeyError, match=r"Keyword 'NAXIS' not found."): header["NAXIS"] def test_hierarch_card_lookup(self): header = fits.Header() header["hierarch abcdefghi"] = 10 assert "abcdefghi" in header assert header["abcdefghi"] == 10 # This used to be assert_false, but per ticket # https://aeon.stsci.edu/ssb/trac/pyfits/ticket/155 hierarch keywords # should be treated case-insensitively when performing lookups assert "ABCDEFGHI" in header def test_hierarch_card_delete(self): header = fits.Header() header["hierarch abcdefghi"] = 10 del header["hierarch abcdefghi"] def test_hierarch_card_insert_delete(self): header = fits.Header() with pytest.warns( fits.verify.VerifyWarning, match=r"greater than 8 characters" ): header["abcdefghi"] = 10 header["abcdefgh"] = 10 header["abcdefg"] = 10 with pytest.warns( fits.verify.VerifyWarning, match=r"greater than 8 characters" ): header.insert(2, ("abcdefghij", 10)) del header["abcdefghij"] with pytest.warns( fits.verify.VerifyWarning, match=r"greater than 8 characters" ): header.insert(2, ("abcdefghij", 10)) del header[2] assert list(header.keys())[2] == "abcdefg".upper() def test_hierarch_create_and_update(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/158 Tests several additional use cases for working with HIERARCH cards. """ msg = "a HIERARCH card will be created" header = fits.Header() with pytest.warns(VerifyWarning) as w: header.update({"HIERARCH BLAH BLAH": "TESTA"}) assert len(w) == 0 assert "BLAH BLAH" in header assert header["BLAH BLAH"] == "TESTA" header.update({"HIERARCH BLAH BLAH": "TESTB"}) assert len(w) == 0 assert header["BLAH BLAH"], "TESTB" # Update without explicitly stating 'HIERARCH': header.update({"BLAH BLAH": "TESTC"}) assert len(w) == 1 assert len(header) == 1 assert header["BLAH BLAH"], "TESTC" # Test case-insensitivity header.update({"HIERARCH blah blah": "TESTD"}) assert len(w) == 1 assert len(header) == 1 assert header["blah blah"], "TESTD" header.update({"blah blah": "TESTE"}) assert len(w) == 2 assert len(header) == 1 assert header["blah blah"], "TESTE" # Create a HIERARCH card > 8 characters without explicitly stating # 'HIERARCH' header.update({"BLAH BLAH BLAH": "TESTA"}) assert len(w) == 3 assert msg in str(w[0].message) header.update({"HIERARCH BLAH BLAH BLAH": "TESTB"}) assert len(w) == 3 assert header["BLAH BLAH BLAH"], "TESTB" # Update without explicitly stating 'HIERARCH': header.update({"BLAH BLAH BLAH": "TESTC"}) assert len(w) == 4 assert header["BLAH BLAH BLAH"], "TESTC" # Test case-insensitivity header.update({"HIERARCH blah blah blah": "TESTD"}) assert len(w) == 4 assert header["blah blah blah"], "TESTD" header.update({"blah blah blah": "TESTE"}) assert len(w) == 5 assert header["blah blah blah"], "TESTE" def test_short_hierarch_create_and_update(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/158 Tests several additional use cases for working with HIERARCH cards, specifically where the keyword is fewer than 8 characters, but contains invalid characters such that it can only be created as a HIERARCH card. """ msg = "a HIERARCH card will be created" header = fits.Header() with pytest.warns(VerifyWarning) as w: header.update({"HIERARCH BLA BLA": "TESTA"}) assert len(w) == 0 assert "BLA BLA" in header assert header["BLA BLA"] == "TESTA" header.update({"HIERARCH BLA BLA": "TESTB"}) assert len(w) == 0 assert header["BLA BLA"], "TESTB" # Update without explicitly stating 'HIERARCH': header.update({"BLA BLA": "TESTC"}) assert len(w) == 1 assert header["BLA BLA"], "TESTC" # Test case-insensitivity header.update({"HIERARCH bla bla": "TESTD"}) assert len(w) == 1 assert len(header) == 1 assert header["bla bla"], "TESTD" header.update({"bla bla": "TESTE"}) assert len(w) == 2 assert len(header) == 1 assert header["bla bla"], "TESTE" header = fits.Header() with pytest.warns(VerifyWarning) as w: # Create a HIERARCH card containing invalid characters without # explicitly stating 'HIERARCH' header.update({"BLA BLA": "TESTA"}) print([x.category for x in w]) assert len(w) == 1 assert msg in str(w[0].message) header.update({"HIERARCH BLA BLA": "TESTB"}) assert len(w) == 1 assert header["BLA BLA"], "TESTB" # Update without explicitly stating 'HIERARCH': header.update({"BLA BLA": "TESTC"}) assert len(w) == 2 assert header["BLA BLA"], "TESTC" # Test case-insensitivity header.update({"HIERARCH bla bla": "TESTD"}) assert len(w) == 2 assert len(header) == 1 assert header["bla bla"], "TESTD" header.update({"bla bla": "TESTE"}) assert len(w) == 3 assert len(header) == 1 assert header["bla bla"], "TESTE" def test_header_setitem_invalid(self): header = fits.Header() def test(): header["FOO"] = ("bar", "baz", "qux") pytest.raises(ValueError, test) def test_header_setitem_1tuple(self): header = fits.Header() header["FOO"] = ("BAR",) header["FOO2"] = (None,) assert header["FOO"] == "BAR" assert header["FOO2"] is None assert header[0] == "BAR" assert header.comments[0] == "" assert header.comments["FOO"] == "" def test_header_setitem_2tuple(self): header = fits.Header() header["FOO"] = ("BAR", "BAZ") header["FOO2"] = (None, None) assert header["FOO"] == "BAR" assert header["FOO2"] is None assert header[0] == "BAR" assert header.comments[0] == "BAZ" assert header.comments["FOO"] == "BAZ" assert header.comments["FOO2"] == "" def test_header_set_value_to_none(self): """ Setting the value of a card to None should simply give that card an undefined value. Undefined value should map to None. """ header = fits.Header() header["FOO"] = "BAR" assert header["FOO"] == "BAR" header["FOO"] = None assert header["FOO"] is None # Create a header that contains an undefined value and a defined # value. hstr = "UNDEF = \nDEFINED = 42" header = fits.Header.fromstring(hstr, sep="\n") # Explicitly add a card with an UNDEFINED value c = fits.Card("UNDEF2", fits.card.UNDEFINED) header.extend([c]) # And now assign an undefined value to the header through setitem header["UNDEF3"] = fits.card.UNDEFINED # Tuple assignment header.append(("UNDEF5", None, "Undefined value"), end=True) header.append("UNDEF6") assert header["DEFINED"] == 42 assert header["UNDEF"] is None assert header["UNDEF2"] is None assert header["UNDEF3"] is None assert header["UNDEF5"] is None assert header["UNDEF6"] is None # Assign an undefined value to a new card header["UNDEF4"] = None # Overwrite an existing value with None header["DEFINED"] = None # All headers now should be undefined for c in header.cards: assert c.value == fits.card.UNDEFINED def test_set_comment_only(self): header = fits.Header([("A", "B", "C")]) header.set("A", comment="D") assert header["A"] == "B" assert header.comments["A"] == "D" def test_header_iter(self): header = fits.Header([("A", "B"), ("C", "D")]) assert list(header) == ["A", "C"] def test_header_slice(self): header = fits.Header([("A", "B"), ("C", "D"), ("E", "F")]) newheader = header[1:] assert len(newheader) == 2 assert "A" not in newheader assert "C" in newheader assert "E" in newheader newheader = header[::-1] assert len(newheader) == 3 assert newheader[0] == "F" assert newheader[1] == "D" assert newheader[2] == "B" newheader = header[::2] assert len(newheader) == 2 assert "A" in newheader assert "C" not in newheader assert "E" in newheader def test_header_slice_assignment(self): """ Assigning to a slice should just assign new values to the cards included in the slice. """ header = fits.Header([("A", "B"), ("C", "D"), ("E", "F")]) # Test assigning slice to the same value; this works similarly to numpy # arrays header[1:] = 1 assert header[1] == 1 assert header[2] == 1 # Though strings are iterable they should be treated as a scalar value header[1:] = "GH" assert header[1] == "GH" assert header[2] == "GH" # Now assign via an iterable header[1:] = ["H", "I"] assert header[1] == "H" assert header[2] == "I" def test_header_slice_delete(self): """Test deleting a slice of cards from the header.""" header = fits.Header([("A", "B"), ("C", "D"), ("E", "F")]) del header[1:] assert len(header) == 1 assert header[0] == "B" del header[:] assert len(header) == 0 def test_wildcard_slice(self): """Test selecting a subsection of a header via wildcard matching.""" header = fits.Header([("ABC", 0), ("DEF", 1), ("ABD", 2)]) newheader = header["AB*"] assert len(newheader) == 2 assert newheader[0] == 0 assert newheader[1] == 2 def test_wildcard_with_hyphen(self): """ Regression test for issue where wildcards did not work on keywords containing hyphens. """ header = fits.Header([("DATE", 1), ("DATE-OBS", 2), ("DATE-FOO", 3)]) assert len(header["DATE*"]) == 3 assert len(header["DATE?*"]) == 2 assert len(header["DATE-*"]) == 2 def test_wildcard_slice_assignment(self): """Test assigning to a header slice selected via wildcard matching.""" header = fits.Header([("ABC", 0), ("DEF", 1), ("ABD", 2)]) # Test assigning slice to the same value; this works similarly to numpy # arrays header["AB*"] = 1 assert header[0] == 1 assert header[2] == 1 # Though strings are iterable they should be treated as a scalar value header["AB*"] = "GH" assert header[0] == "GH" assert header[2] == "GH" # Now assign via an iterable header["AB*"] = ["H", "I"] assert header[0] == "H" assert header[2] == "I" def test_wildcard_slice_deletion(self): """Test deleting cards from a header that match a wildcard pattern.""" header = fits.Header([("ABC", 0), ("DEF", 1), ("ABD", 2)]) del header["AB*"] assert len(header) == 1 assert header[0] == 1 def test_header_history(self): header = fits.Header( [ ("ABC", 0), ("HISTORY", 1), ("HISTORY", 2), ("DEF", 3), ("HISTORY", 4), ("HISTORY", 5), ] ) assert header["HISTORY"] == [1, 2, 4, 5] def test_header_clear(self): header = fits.Header([("A", "B"), ("C", "D")]) header.clear() assert "A" not in header assert "C" not in header assert len(header) == 0 @pytest.mark.parametrize("fitsext", [fits.ImageHDU(), fits.CompImageHDU()]) def test_header_clear_write(self, fitsext): hdulist = fits.HDUList([fits.PrimaryHDU(), fitsext]) hdulist[1].header["FOO"] = "BAR" hdulist[1].header.clear() with pytest.raises(VerifyError) as err: hdulist.writeto(self.temp("temp.fits"), overwrite=True) err_msg = "'XTENSION' card does not exist." assert err_msg in str(err.value) def test_header_fromkeys(self): header = fits.Header.fromkeys(["A", "B"]) assert "A" in header assert header["A"] is None assert header.comments["A"] == "" assert "B" in header assert header["B"] is None assert header.comments["B"] == "" def test_header_fromkeys_with_value(self): header = fits.Header.fromkeys(["A", "B"], "C") assert "A" in header assert header["A"] == "C" assert header.comments["A"] == "" assert "B" in header assert header["B"] == "C" assert header.comments["B"] == "" def test_header_fromkeys_with_value_and_comment(self): header = fits.Header.fromkeys(["A"], ("B", "C")) assert "A" in header assert header["A"] == "B" assert header.comments["A"] == "C" def test_header_fromkeys_with_duplicates(self): header = fits.Header.fromkeys(["A", "B", "A"], "C") assert "A" in header assert ("A", 0) in header assert ("A", 1) in header assert ("A", 2) not in header assert header[0] == "C" assert header["A"] == "C" assert header[("A", 0)] == "C" assert header[2] == "C" assert header[("A", 1)] == "C" def test_header_items(self): header = fits.Header([("A", "B"), ("C", "D")]) assert list(header.items()) == [("A", "B"), ("C", "D")] def test_header_iterkeys(self): header = fits.Header([("A", "B"), ("C", "D")]) for a, b in zip(header.keys(), header): assert a == b def test_header_itervalues(self): header = fits.Header([("A", "B"), ("C", "D")]) for a, b in zip(header.values(), ["B", "D"]): assert a == b def test_header_keys(self): with fits.open(self.data("arange.fits")) as hdul: assert list(hdul[0].header) == [ "SIMPLE", "BITPIX", "NAXIS", "NAXIS1", "NAXIS2", "NAXIS3", "EXTEND", ] def test_header_list_like_pop(self): header = fits.Header([("A", "B"), ("C", "D"), ("E", "F"), ("G", "H")]) last = header.pop() assert last == "H" assert len(header) == 3 assert list(header) == ["A", "C", "E"] mid = header.pop(1) assert mid == "D" assert len(header) == 2 assert list(header) == ["A", "E"] first = header.pop(0) assert first == "B" assert len(header) == 1 assert list(header) == ["E"] pytest.raises(IndexError, header.pop, 42) def test_header_dict_like_pop(self): header = fits.Header([("A", "B"), ("C", "D"), ("E", "F"), ("G", "H")]) pytest.raises(TypeError, header.pop, "A", "B", "C") last = header.pop("G") assert last == "H" assert len(header) == 3 assert list(header) == ["A", "C", "E"] mid = header.pop("C") assert mid == "D" assert len(header) == 2 assert list(header) == ["A", "E"] first = header.pop("A") assert first == "B" assert len(header) == 1 assert list(header) == ["E"] default = header.pop("X", "Y") assert default == "Y" assert len(header) == 1 pytest.raises(KeyError, header.pop, "X") def test_popitem(self): header = fits.Header([("A", "B"), ("C", "D"), ("E", "F")]) keyword, value = header.popitem() assert keyword not in header assert len(header) == 2 keyword, value = header.popitem() assert keyword not in header assert len(header) == 1 keyword, value = header.popitem() assert keyword not in header assert len(header) == 0 pytest.raises(KeyError, header.popitem) def test_setdefault(self): header = fits.Header([("A", "B"), ("C", "D"), ("E", "F")]) assert header.setdefault("A") == "B" assert header.setdefault("C") == "D" assert header.setdefault("E") == "F" assert len(header) == 3 assert header.setdefault("G", "H") == "H" assert len(header) == 4 assert "G" in header assert header.setdefault("G", "H") == "H" assert len(header) == 4 def test_update_from_dict(self): """ Test adding new cards and updating existing cards from a dict using Header.update() """ header = fits.Header([("A", "B"), ("C", "D")]) header.update({"A": "E", "F": "G"}) assert header["A"] == "E" assert header[0] == "E" assert "F" in header assert header["F"] == "G" assert header[-1] == "G" # Same as above but this time pass the update dict as keyword arguments header = fits.Header([("A", "B"), ("C", "D")]) header.update(A="E", F="G") assert header["A"] == "E" assert header[0] == "E" assert "F" in header assert header["F"] == "G" assert header[-1] == "G" def test_update_from_iterable(self): """ Test adding new cards and updating existing cards from an iterable of cards and card tuples. """ header = fits.Header([("A", "B"), ("C", "D")]) header.update([("A", "E"), fits.Card("F", "G")]) assert header["A"] == "E" assert header[0] == "E" assert "F" in header assert header["F"] == "G" assert header[-1] == "G" def test_header_extend(self): """ Test extending a header both with and without stripping cards from the extension header. """ hdu = fits.PrimaryHDU() hdu2 = fits.ImageHDU() hdu2.header["MYKEY"] = ("some val", "some comment") hdu.header += hdu2.header assert len(hdu.header) == 5 assert hdu.header[-1] == "some val" # Same thing, but using + instead of += hdu = fits.PrimaryHDU() hdu.header = hdu.header + hdu2.header assert len(hdu.header) == 5 assert hdu.header[-1] == "some val" # Directly append the other header in full--not usually a desirable # operation when the header is coming from another HDU hdu.header.extend(hdu2.header, strip=False) assert len(hdu.header) == 11 assert list(hdu.header)[5] == "XTENSION" assert hdu.header[-1] == "some val" assert ("MYKEY", 1) in hdu.header def test_header_extend_unique(self): """ Test extending the header with and without unique=True. """ hdu = fits.PrimaryHDU() hdu2 = fits.ImageHDU() hdu.header["MYKEY"] = ("some val", "some comment") hdu2.header["MYKEY"] = ("some other val", "some other comment") hdu.header.extend(hdu2.header) assert len(hdu.header) == 6 assert hdu.header[-2] == "some val" assert hdu.header[-1] == "some other val" hdu = fits.PrimaryHDU() hdu2 = fits.ImageHDU() hdu.header["MYKEY"] = ("some val", "some comment") hdu2.header["MYKEY"] = ("some other val", "some other comment") hdu.header.extend(hdu2.header, unique=True) assert len(hdu.header) == 5 assert hdu.header[-1] == "some val" def test_header_extend_unique_commentary(self): """ Test extending header with and without unique=True and commentary cards in the header being added. Issue astropy/astropy#3967 """ for commentary_card in ["", "COMMENT", "HISTORY"]: for is_unique in [True, False]: hdu = fits.PrimaryHDU() # Make sure we are testing the case we want. assert commentary_card not in hdu.header hdu2 = fits.ImageHDU() hdu2.header[commentary_card] = "My text" hdu.header.extend(hdu2.header, unique=is_unique) assert len(hdu.header) == 5 assert hdu.header[commentary_card][0] == "My text" def test_header_extend_update(self): """ Test extending the header with and without update=True. """ hdu = fits.PrimaryHDU() hdu2 = fits.ImageHDU() hdu.header["MYKEY"] = ("some val", "some comment") hdu.header["HISTORY"] = "history 1" hdu2.header["MYKEY"] = ("some other val", "some other comment") hdu2.header["HISTORY"] = "history 1" hdu2.header["HISTORY"] = "history 2" hdu.header.extend(hdu2.header) assert len(hdu.header) == 9 assert ("MYKEY", 0) in hdu.header assert ("MYKEY", 1) in hdu.header assert hdu.header[("MYKEY", 1)] == "some other val" assert len(hdu.header["HISTORY"]) == 3 assert hdu.header[-1] == "history 2" hdu = fits.PrimaryHDU() hdu.header["MYKEY"] = ("some val", "some comment") hdu.header["HISTORY"] = "history 1" hdu.header.extend(hdu2.header, update=True) assert len(hdu.header) == 7 assert ("MYKEY", 0) in hdu.header assert ("MYKEY", 1) not in hdu.header assert hdu.header["MYKEY"] == "some other val" assert len(hdu.header["HISTORY"]) == 2 assert hdu.header[-1] == "history 2" def test_header_extend_update_commentary(self): """ Test extending header with and without unique=True and commentary cards in the header being added. Though not quite the same as astropy/astropy#3967, update=True hits the same if statement as that issue. """ for commentary_card in ["", "COMMENT", "HISTORY"]: for is_update in [True, False]: hdu = fits.PrimaryHDU() # Make sure we are testing the case we want. assert commentary_card not in hdu.header hdu2 = fits.ImageHDU() hdu2.header[commentary_card] = "My text" hdu.header.extend(hdu2.header, update=is_update) assert len(hdu.header) == 5 assert hdu.header[commentary_card][0] == "My text" def test_header_extend_exact(self): """ Test that extending an empty header with the contents of an existing header can exactly duplicate that header, given strip=False and end=True. """ header = fits.getheader(self.data("test0.fits")) header2 = fits.Header() header2.extend(header, strip=False, end=True) assert header == header2 def test_header_count(self): header = fits.Header([("A", "B"), ("C", "D"), ("E", "F")]) assert header.count("A") == 1 assert header.count("C") == 1 assert header.count("E") == 1 header["HISTORY"] = "a" header["HISTORY"] = "b" assert header.count("HISTORY") == 2 pytest.raises(KeyError, header.count, "G") def test_header_append_use_blanks(self): """ Tests that blank cards can be appended, and that future appends will use blank cards when available (unless useblanks=False) """ header = fits.Header([("A", "B"), ("C", "D")]) # Append a couple blanks header.append() header.append() assert len(header) == 4 assert header[-1] == "" assert header[-2] == "" # New card should fill the first blank by default header.append(("E", "F")) assert len(header) == 4 assert header[-2] == "F" assert header[-1] == "" # This card should not use up a blank spot header.append(("G", "H"), useblanks=False) assert len(header) == 5 assert header[-1] == "" assert header[-2] == "H" def test_header_append_keyword_only(self): """ Test appending a new card with just the keyword, and no value or comment given. """ header = fits.Header([("A", "B"), ("C", "D")]) header.append("E") assert len(header) == 3 assert list(header)[-1] == "E" assert header[-1] is None assert header.comments["E"] == "" # Try appending a blank--normally this can be accomplished with just # header.append(), but header.append('') should also work (and is maybe # a little more clear) header.append("") assert len(header) == 4 assert list(header)[-1] == "" assert header[""] == "" assert header.comments[""] == "" def test_header_insert_use_blanks(self): header = fits.Header([("A", "B"), ("C", "D")]) # Append a couple blanks header.append() header.append() # Insert a new card; should use up one of the blanks header.insert(1, ("E", "F")) assert len(header) == 4 assert header[1] == "F" assert header[-1] == "" assert header[-2] == "D" # Insert a new card without using blanks header.insert(1, ("G", "H"), useblanks=False) assert len(header) == 5 assert header[1] == "H" assert header[-1] == "" def test_header_insert_before_keyword(self): """ Test that a keyword name or tuple can be used to insert new keywords. Also tests the ``after`` keyword argument. Regression test for https://github.com/spacetelescope/PyFITS/issues/12 """ header = fits.Header( [("NAXIS1", 10), ("COMMENT", "Comment 1"), ("COMMENT", "Comment 3")] ) header.insert("NAXIS1", ("NAXIS", 2, "Number of axes")) assert list(header.keys())[0] == "NAXIS" assert header[0] == 2 assert header.comments[0] == "Number of axes" header.insert("NAXIS1", ("NAXIS2", 20), after=True) assert list(header.keys())[1] == "NAXIS1" assert list(header.keys())[2] == "NAXIS2" assert header[2] == 20 header.insert(("COMMENT", 1), ("COMMENT", "Comment 2")) assert header["COMMENT"] == ["Comment 1", "Comment 2", "Comment 3"] header.insert(("COMMENT", 2), ("COMMENT", "Comment 4"), after=True) assert header["COMMENT"] == ["Comment 1", "Comment 2", "Comment 3", "Comment 4"] header.insert(-1, ("TEST1", True)) assert list(header.keys())[-2] == "TEST1" header.insert(-1, ("TEST2", True), after=True) assert list(header.keys())[-1] == "TEST2" assert list(header.keys())[-3] == "TEST1" def test_remove(self): header = fits.Header([("A", "B"), ("C", "D")]) # When keyword is present in the header it should be removed. header.remove("C") assert len(header) == 1 assert list(header) == ["A"] assert "C" not in header # When keyword is not present in the header and ignore_missing is # False, KeyError should be raised with pytest.raises(KeyError): header.remove("F") # When keyword is not present and ignore_missing is True, KeyError # will be ignored header.remove("F", ignore_missing=True) assert len(header) == 1 # Test for removing all instances of a keyword header = fits.Header([("A", "B"), ("C", "D"), ("A", "F")]) header.remove("A", remove_all=True) assert "A" not in header assert len(header) == 1 assert list(header) == ["C"] assert header[0] == "D" def test_header_comments(self): header = fits.Header([("A", "B", "C"), ("DEF", "G", "H")]) assert repr(header.comments) == " A C\n DEF H" def test_comment_slices_and_filters(self): header = fits.Header([("AB", "C", "D"), ("EF", "G", "H"), ("AI", "J", "K")]) s = header.comments[1:] assert list(s) == ["H", "K"] s = header.comments[::-1] assert list(s) == ["K", "H", "D"] s = header.comments["A*"] assert list(s) == ["D", "K"] def test_comment_slice_filter_assign(self): header = fits.Header([("AB", "C", "D"), ("EF", "G", "H"), ("AI", "J", "K")]) header.comments[1:] = "L" assert list(header.comments) == ["D", "L", "L"] assert header.cards[header.index("AB")].comment == "D" assert header.cards[header.index("EF")].comment == "L" assert header.cards[header.index("AI")].comment == "L" header.comments[::-1] = header.comments[:] assert list(header.comments) == ["L", "L", "D"] header.comments["A*"] = ["M", "N"] assert list(header.comments) == ["M", "L", "N"] def test_commentary_slicing(self): header = fits.Header() indices = list(range(5)) for idx in indices: header["HISTORY"] = idx # Just a few sample slice types; this won't get all corner cases but if # these all work we should be in good shape assert header["HISTORY"][1:] == indices[1:] assert header["HISTORY"][:3] == indices[:3] assert header["HISTORY"][:6] == indices[:6] assert header["HISTORY"][:-2] == indices[:-2] assert header["HISTORY"][::-1] == indices[::-1] assert header["HISTORY"][1::-1] == indices[1::-1] assert header["HISTORY"][1:5:2] == indices[1:5:2] # Same tests, but copy the values first; as it turns out this is # different from just directly doing an __eq__ as in the first set of # assertions header.insert(0, ("A", "B", "C")) header.append(("D", "E", "F"), end=True) assert list(header["HISTORY"][1:]) == indices[1:] assert list(header["HISTORY"][:3]) == indices[:3] assert list(header["HISTORY"][:6]) == indices[:6] assert list(header["HISTORY"][:-2]) == indices[:-2] assert list(header["HISTORY"][::-1]) == indices[::-1] assert list(header["HISTORY"][1::-1]) == indices[1::-1] assert list(header["HISTORY"][1:5:2]) == indices[1:5:2] def test_update_commentary(self): header = fits.Header() header["FOO"] = "BAR" header["HISTORY"] = "ABC" header["FRED"] = "BARNEY" header["HISTORY"] = "DEF" header["HISTORY"] = "GHI" assert header["HISTORY"] == ["ABC", "DEF", "GHI"] # Single value update header["HISTORY"][0] = "FOO" assert header["HISTORY"] == ["FOO", "DEF", "GHI"] # Single value partial slice update header["HISTORY"][1:] = "BAR" assert header["HISTORY"] == ["FOO", "BAR", "BAR"] # Multi-value update header["HISTORY"][:] = ["BAZ", "QUX"] assert header["HISTORY"] == ["BAZ", "QUX", "BAR"] def test_commentary_comparison(self): """ Regression test for an issue found in *writing* the regression test for https://github.com/astropy/astropy/issues/2363, where comparison of the list of values for a commentary keyword did not always compare correctly with other iterables. """ header = fits.Header() header["HISTORY"] = "hello world" header["HISTORY"] = "hello world" header["COMMENT"] = "hello world" assert header["HISTORY"] != header["COMMENT"] header["COMMENT"] = "hello world" assert header["HISTORY"] == header["COMMENT"] def test_long_commentary_card(self): header = fits.Header() header["FOO"] = "BAR" header["BAZ"] = "QUX" longval = "ABC" * 30 header["HISTORY"] = longval header["FRED"] = "BARNEY" header["HISTORY"] = longval assert len(header) == 7 assert list(header)[2] == "FRED" assert str(header.cards[3]) == "HISTORY " + longval[:72] assert str(header.cards[4]).rstrip() == "HISTORY " + longval[72:] header.set("HISTORY", longval, after="FOO") assert len(header) == 9 assert str(header.cards[1]) == "HISTORY " + longval[:72] assert str(header.cards[2]).rstrip() == "HISTORY " + longval[72:] header = fits.Header() header.update({"FOO": "BAR"}) header.update({"BAZ": "QUX"}) longval = "ABC" * 30 header.add_history(longval) header.update({"FRED": "BARNEY"}) header.add_history(longval) assert len(header.cards) == 7 assert header.cards[2].keyword == "FRED" assert str(header.cards[3]) == "HISTORY " + longval[:72] assert str(header.cards[4]).rstrip() == "HISTORY " + longval[72:] header.add_history(longval, after="FOO") assert len(header.cards) == 9 assert str(header.cards[1]) == "HISTORY " + longval[:72] assert str(header.cards[2]).rstrip() == "HISTORY " + longval[72:] def test_totxtfile(self, home_is_temp): header_filename = self.temp("header.txt") with fits.open(self.data("test0.fits")) as hdul: hdul[0].header.totextfile(header_filename) # Check the `overwrite` flag with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH): hdul[0].header.totextfile(header_filename, overwrite=False) hdul[0].header.totextfile(header_filename, overwrite=True) hdu = fits.ImageHDU() hdu.header.update({"MYKEY": "FOO"}) hdu.header.extend( hdu.header.fromtextfile(header_filename), update=True, update_first=True ) # Write the hdu out and read it back in again--it should be recognized # as a PrimaryHDU hdu.writeto(self.temp("test.fits"), output_verify="ignore") with fits.open(self.temp("test.fits")) as hdul: assert isinstance(hdul[0], fits.PrimaryHDU) hdu = fits.ImageHDU() hdu.header.update({"MYKEY": "FOO"}) hdu.header.extend( hdu.header.fromtextfile(header_filename), update=True, update_first=True, strip=False, ) assert "MYKEY" in hdu.header assert "EXTENSION" not in hdu.header assert "SIMPLE" in hdu.header hdu.writeto(self.temp("test.fits"), output_verify="ignore", overwrite=True) with fits.open(self.temp("test.fits")) as hdul2: assert len(hdul2) == 2 assert "MYKEY" in hdul2[1].header def test_tofile(self, home_is_temp): """ Repeat test_totxtfile, but with tofile() """ header_filename = self.temp("header.fits") with fits.open(self.data("test0.fits")) as hdul: hdul[0].header.tofile(header_filename) # Check the `overwrite` flag with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH): hdul[0].header.tofile(header_filename, overwrite=False) hdul[0].header.tofile(header_filename, overwrite=True) hdu = fits.ImageHDU() hdu.header.update({"MYKEY": "FOO"}) hdu.header.extend( hdu.header.fromfile(header_filename), update=True, update_first=True ) # Write the hdu out and read it back in again--it should be recognized # as a PrimaryHDU hdu.writeto(self.temp("test.fits"), output_verify="ignore") with fits.open(self.temp("test.fits")) as hdul: assert isinstance(hdul[0], fits.PrimaryHDU) hdu = fits.ImageHDU() hdu.header.update({"MYKEY": "FOO"}) hdu.header.extend( hdu.header.fromfile(header_filename), update=True, update_first=True, strip=False, ) assert "MYKEY" in hdu.header assert "EXTENSION" not in hdu.header assert "SIMPLE" in hdu.header hdu.writeto(self.temp("test.fits"), output_verify="ignore", overwrite=True) with fits.open(self.temp("test.fits")) as hdul2: assert len(hdul2) == 2 assert "MYKEY" in hdul2[1].header def test_fromfile(self): """Regression test for https://github.com/astropy/astropy/issues/8711""" filename = self.data("scale.fits") hdr = fits.Header.fromfile(filename) assert hdr["DATASET"] == "2MASS" def test_header_fromtextfile(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/122 Manually write a text file containing some header cards ending with newlines and ensure that fromtextfile can read them back in. """ header = fits.Header() header["A"] = ("B", "C") header["B"] = ("C", "D") header["C"] = ("D", "E") with open(self.temp("test.hdr"), "w") as f: f.write("\n".join(str(c).strip() for c in header.cards)) header2 = fits.Header.fromtextfile(self.temp("test.hdr")) assert header == header2 def test_header_fromtextfile_with_end_card(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/154 Make sure that when a Header is read from a text file that the END card is ignored. """ header = fits.Header([("A", "B", "C"), ("D", "E", "F")]) # We don't use header.totextfile here because it writes each card with # trailing spaces to pad them out to 80 characters. But this bug only # presents itself when each card ends immediately with a newline, and # no trailing spaces with open(self.temp("test.hdr"), "w") as f: f.write("\n".join(str(c).strip() for c in header.cards)) f.write("\nEND") new_header = fits.Header.fromtextfile(self.temp("test.hdr")) assert "END" not in new_header assert header == new_header def test_append_end_card(self): """ Regression test 2 for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/154 Manually adding an END card to a header should simply result in a ValueError (as was the case in PyFITS 3.0 and earlier). """ header = fits.Header([("A", "B", "C"), ("D", "E", "F")]) def setitem(k, v): header[k] = v pytest.raises(ValueError, setitem, "END", "") pytest.raises(ValueError, header.append, "END") pytest.raises(ValueError, header.append, "END", end=True) pytest.raises(ValueError, header.insert, len(header), "END") pytest.raises(ValueError, header.set, "END") def test_invalid_end_cards(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/217 This tests the case where the END card looks like a normal card like 'END = ' and other similar oddities. As long as a card starts with END and looks like it was intended to be the END card we allow it, but with a warning. """ horig = fits.PrimaryHDU(data=np.arange(100)).header def invalid_header(end, pad): # Build up a goofy invalid header # Start from a seemingly normal header s = horig.tostring(sep="", endcard=False, padding=False) # append the bogus end card s += end # add additional padding if requested if pad: s += " " * _pad_length(len(s)) # This will differ between Python versions if isinstance(s, bytes): return BytesIO(s) else: return StringIO(s) # Basic case motivated by the original issue; it's as if the END card # was appended by software that doesn't know to treat it specially, and # it is given an = after it s = invalid_header("END =", True) with pytest.warns( AstropyUserWarning, match="Unexpected bytes trailing END keyword: ' ='" ) as w: h = fits.Header.fromfile(s) assert h == horig assert len(w) == 1 # A case similar to the last but with more spaces between END and the # =, as though the '= ' value indicator were placed like that of a # normal card s = invalid_header("END = ", True) with pytest.warns( AstropyUserWarning, match="Unexpected bytes trailing END keyword: ' ='" ) as w: h = fits.Header.fromfile(s) assert h == horig assert len(w) == 1 # END card with trailing gibberish s = invalid_header("END$%&%^*%*", True) with pytest.warns( AstropyUserWarning, match=r"Unexpected bytes trailing " r"END keyword: '\$%&%\^\*%\*'", ) as w: h = fits.Header.fromfile(s) assert h == horig assert len(w) == 1 # 'END' at the very end of a truncated file without padding; the way # the block reader works currently this can only happen if the 'END' # is at the very end of the file. s = invalid_header("END", False) with pytest.warns( AstropyUserWarning, match="Missing padding to end of the FITS block" ) as w: # Don't raise an exception on missing padding, but still produce a # warning that the END card is incomplete h = fits.Header.fromfile(s, padding=False) assert h == horig assert len(w) == 1 def test_invalid_characters(self): """ Test header with invalid characters """ # Generate invalid file with non-ASCII character h = fits.Header() h["FOO"] = "BAR" h["COMMENT"] = "hello" hdul = fits.PrimaryHDU(header=h, data=np.arange(5)) hdul.writeto(self.temp("test.fits")) with open(self.temp("test.fits"), "rb") as f: out = f.read() out = out.replace(b"hello", "héllo".encode("latin1")) out = out.replace(b"BAR", "BÀR".encode("latin1")) with open(self.temp("test2.fits"), "wb") as f2: f2.write(out) with pytest.warns( AstropyUserWarning, match="non-ASCII characters are present in the FITS file", ) as w: h = fits.getheader(self.temp("test2.fits")) assert h["FOO"] == "B?R" assert h["COMMENT"] == "h?llo" assert len(w) == 1 def test_unnecessary_move(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/125 Ensures that a header is not modified when setting the position of a keyword that's already in its correct position. """ header = fits.Header([("A", "B"), ("B", "C"), ("C", "D")]) header.set("B", before=2) assert list(header) == ["A", "B", "C"] assert not header._modified header.set("B", after=0) assert list(header) == ["A", "B", "C"] assert not header._modified header.set("B", before="C") assert list(header) == ["A", "B", "C"] assert not header._modified header.set("B", after="A") assert list(header) == ["A", "B", "C"] assert not header._modified header.set("B", before=2) assert list(header) == ["A", "B", "C"] assert not header._modified # 123 is well past the end, and C is already at the end, so it's in the # right place already header.set("C", before=123) assert list(header) == ["A", "B", "C"] assert not header._modified header.set("C", after=123) assert list(header) == ["A", "B", "C"] assert not header._modified def test_invalid_float_cards(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/137""" # Create a header containing two of the problematic cards in the test # case where this came up: hstr = "FOCALLEN= +1.550000000000e+002\nAPERTURE= +0.000000000000e+000" h = fits.Header.fromstring(hstr, sep="\n") # First the case that *does* work prior to fixing this issue assert h["FOCALLEN"] == 155.0 assert h["APERTURE"] == 0.0 # Now if this were reserialized, would new values for these cards be # written with repaired exponent signs? with pytest.warns( fits.verify.VerifyWarning, match=r"Verification reported errors" ): assert str(h.cards["FOCALLEN"]) == _pad("FOCALLEN= +1.550000000000E+002") assert h.cards["FOCALLEN"]._modified with pytest.warns( fits.verify.VerifyWarning, match=r"Verification reported errors" ): assert str(h.cards["APERTURE"]) == _pad("APERTURE= +0.000000000000E+000") assert h.cards["APERTURE"]._modified assert h._modified # This is the case that was specifically causing problems; generating # the card strings *before* parsing the values. Also, the card strings # really should be "fixed" before being returned to the user h = fits.Header.fromstring(hstr, sep="\n") with pytest.warns( fits.verify.VerifyWarning, match=r"Verification reported errors" ): assert str(h.cards["FOCALLEN"]) == _pad("FOCALLEN= +1.550000000000E+002") assert h.cards["FOCALLEN"]._modified with pytest.warns( fits.verify.VerifyWarning, match=r"Verification reported errors" ): assert str(h.cards["APERTURE"]) == _pad("APERTURE= +0.000000000000E+000") assert h.cards["APERTURE"]._modified assert h["FOCALLEN"] == 155.0 assert h["APERTURE"] == 0.0 assert h._modified # For the heck of it, try assigning the identical values and ensure # that the newly fixed value strings are left intact h["FOCALLEN"] = 155.0 h["APERTURE"] = 0.0 assert str(h.cards["FOCALLEN"]) == _pad("FOCALLEN= +1.550000000000E+002") assert str(h.cards["APERTURE"]) == _pad("APERTURE= +0.000000000000E+000") def test_invalid_float_cards2(self, capsys): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/140 """ # The example for this test requires creating a FITS file containing a # slightly misformatted float value. I can't actually even find a way # to do that directly through Astropy--it won't let me. hdu = fits.PrimaryHDU() hdu.header["TEST"] = 5.0022221e-07 hdu.writeto(self.temp("test.fits")) # Here we manually make the file invalid with open(self.temp("test.fits"), "rb+") as f: f.seek(346) # Location of the exponent 'E' symbol f.write(encode_ascii("e")) with fits.open(self.temp("test.fits")) as hdul, pytest.warns( AstropyUserWarning ) as w: hdul.writeto(self.temp("temp.fits"), output_verify="warn") assert len(w) == 5 # The first two warnings are just the headers to the actual warning # message (HDU 0, Card 4). I'm still not sure things like that # should be output as separate warning messages, but that's # something to think about... msg = str(w[3].message) assert "(invalid value string: '5.0022221e-07')" in msg def test_leading_zeros(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/137, part 2 Ticket https://aeon.stsci.edu/ssb/trac/pyfits/ticket/137 also showed that in float values like 0.001 the leading zero was unnecessarily being stripped off when rewriting the header. Though leading zeros should be removed from integer values to prevent misinterpretation as octal by python (for now Astropy will still maintain the leading zeros if now changes are made to the value, but will drop them if changes are made). """ c = fits.Card.fromstring("APERTURE= +0.000000000000E+000") assert str(c) == _pad("APERTURE= +0.000000000000E+000") assert c.value == 0.0 c = fits.Card.fromstring("APERTURE= 0.000000000000E+000") assert str(c) == _pad("APERTURE= 0.000000000000E+000") assert c.value == 0.0 c = fits.Card.fromstring("APERTURE= 017") assert str(c) == _pad("APERTURE= 017") assert c.value == 17 def test_assign_boolean(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/123 Tests assigning Python and Numpy boolean values to keyword values. """ fooimg = _pad("FOO = T") barimg = _pad("BAR = F") h = fits.Header() h["FOO"] = True h["BAR"] = False assert h["FOO"] is True assert h["BAR"] is False assert str(h.cards["FOO"]) == fooimg assert str(h.cards["BAR"]) == barimg h = fits.Header() h["FOO"] = np.bool_(True) h["BAR"] = np.bool_(False) assert h["FOO"] is True assert h["BAR"] is False assert str(h.cards["FOO"]) == fooimg assert str(h.cards["BAR"]) == barimg h = fits.Header() h.append(fits.Card.fromstring(fooimg)) h.append(fits.Card.fromstring(barimg)) assert h["FOO"] is True assert h["BAR"] is False assert str(h.cards["FOO"]) == fooimg assert str(h.cards["BAR"]) == barimg def test_header_method_keyword_normalization(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/149 Basically ensures that all public Header methods are case-insensitive w.r.t. keywords. Provides a reasonably comprehensive test of several methods at once. """ h = fits.Header([("abC", 1), ("Def", 2), ("GeH", 3)]) assert list(h) == ["ABC", "DEF", "GEH"] assert "abc" in h assert "dEf" in h assert h["geh"] == 3 # Case insensitivity of wildcards assert len(h["g*"]) == 1 h["aBc"] = 2 assert h["abc"] == 2 # ABC already existed so assigning to aBc should not have added any new # cards assert len(h) == 3 del h["gEh"] assert list(h) == ["ABC", "DEF"] assert len(h) == 2 assert h.get("def") == 2 h.set("Abc", 3) assert h["ABC"] == 3 h.set("gEh", 3, before="Abc") assert list(h) == ["GEH", "ABC", "DEF"] assert h.pop("abC") == 3 assert len(h) == 2 assert h.setdefault("def", 3) == 2 assert len(h) == 2 assert h.setdefault("aBc", 1) == 1 assert len(h) == 3 assert list(h) == ["GEH", "DEF", "ABC"] h.update({"GeH": 1, "iJk": 4}) assert len(h) == 4 assert list(h) == ["GEH", "DEF", "ABC", "IJK"] assert h["GEH"] == 1 assert h.count("ijk") == 1 assert h.index("ijk") == 3 h.remove("Def") assert len(h) == 3 assert list(h) == ["GEH", "ABC", "IJK"] def test_end_in_comment(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/142 Tests a case where the comment of a card ends with END, and is followed by several blank cards. """ data = np.arange(100).reshape(10, 10) hdu = fits.PrimaryHDU(data=data) hdu.header["TESTKW"] = ("Test val", "This is the END") # Add a couple blanks after the END string hdu.header.append() hdu.header.append() hdu.writeto(self.temp("test.fits")) with fits.open(self.temp("test.fits"), memmap=False) as hdul: # memmap = False to avoid leaving open a mmap to the file when we # access the data--this causes problems on Windows when we try to # overwrite the file later assert "TESTKW" in hdul[0].header assert hdul[0].header == hdu.header assert (hdul[0].data == data).all() # Add blanks until the header is extended to two block sizes while len(hdu.header) < 36: hdu.header.append() hdu.writeto(self.temp("test.fits"), overwrite=True) with fits.open(self.temp("test.fits")) as hdul: assert "TESTKW" in hdul[0].header assert hdul[0].header == hdu.header assert (hdul[0].data == data).all() # Test parsing the same header when it's written to a text file hdu.header.totextfile(self.temp("test.hdr")) header2 = fits.Header.fromtextfile(self.temp("test.hdr")) assert hdu.header == header2 def test_assign_unicode(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/134 Assigning a unicode literal as a header value should not fail silently. If the value can be converted to ASCII then it should just work. Otherwise it should fail with an appropriate value error. Also tests unicode for keywords and comments. """ erikku = "\u30a8\u30ea\u30c3\u30af" def assign(keyword, val): h[keyword] = val h = fits.Header() h["FOO"] = "BAR" assert "FOO" in h assert h["FOO"] == "BAR" assert repr(h) == _pad("FOO = 'BAR '") pytest.raises(ValueError, assign, erikku, "BAR") h["FOO"] = "BAZ" assert h["FOO"] == "BAZ" assert repr(h) == _pad("FOO = 'BAZ '") pytest.raises(ValueError, assign, "FOO", erikku) h["FOO"] = ("BAR", "BAZ") assert h["FOO"] == "BAR" assert h.comments["FOO"] == "BAZ" assert repr(h) == _pad("FOO = 'BAR ' / BAZ") pytest.raises(ValueError, assign, "FOO", ("BAR", erikku)) pytest.raises(ValueError, assign, "FOO", (erikku, "BAZ")) pytest.raises(ValueError, assign, "FOO", (erikku, erikku)) def test_assign_non_ascii(self): """ First regression test for https://github.com/spacetelescope/PyFITS/issues/37 Although test_assign_unicode ensures that `str` objects containing non-ASCII characters cannot be assigned to headers. It should not be possible to assign bytes to a header at all. """ h = fits.Header() with pytest.raises(ValueError, match="Illegal value: b'Hello'."): h.set("TEST", b"Hello") def test_header_strip_whitespace(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/146, and for the solution that is optional stripping of whitespace from the end of a header value. By default extra whitespace is stripped off, but if `fits.conf.strip_header_whitespace` = False it should not be stripped. """ h = fits.Header() h["FOO"] = "Bar " assert h["FOO"] == "Bar" c = fits.Card.fromstring("QUX = 'Bar '") h.append(c) assert h["QUX"] == "Bar" assert h.cards["FOO"].image.rstrip() == "FOO = 'Bar '" assert h.cards["QUX"].image.rstrip() == "QUX = 'Bar '" with fits.conf.set_temp("strip_header_whitespace", False): assert h["FOO"] == "Bar " assert h["QUX"] == "Bar " assert h.cards["FOO"].image.rstrip() == "FOO = 'Bar '" assert h.cards["QUX"].image.rstrip() == "QUX = 'Bar '" assert h["FOO"] == "Bar" assert h["QUX"] == "Bar" assert h.cards["FOO"].image.rstrip() == "FOO = 'Bar '" assert h.cards["QUX"].image.rstrip() == "QUX = 'Bar '" def test_keep_duplicate_history_in_orig_header(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/156 When creating a new HDU from an existing Header read from an existing FITS file, if the original header contains duplicate HISTORY values those duplicates should be preserved just as in the original header. This bug occurred due to naivete in Header.extend. """ history = [ "CCD parameters table ...", " reference table oref$n951041ko_ccd.fits", " INFLIGHT 12/07/2001 25/02/2002", " all bias frames", ] * 3 hdu = fits.PrimaryHDU() # Add the history entries twice for item in history: hdu.header["HISTORY"] = item hdu.writeto(self.temp("test.fits")) with fits.open(self.temp("test.fits")) as hdul: assert hdul[0].header["HISTORY"] == history new_hdu = fits.PrimaryHDU(header=hdu.header) assert new_hdu.header["HISTORY"] == hdu.header["HISTORY"] new_hdu.writeto(self.temp("test2.fits")) with fits.open(self.temp("test2.fits")) as hdul: assert hdul[0].header["HISTORY"] == history def test_invalid_keyword_cards(self): """ Test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/109 Allow opening files with headers containing invalid keywords. """ # Create a header containing a few different types of BAD headers. c1 = fits.Card.fromstring("CLFIND2D: contour = 0.30") c2 = fits.Card.fromstring("Just some random text.") c3 = fits.Card.fromstring("A" * 80) hdu = fits.PrimaryHDU() # This should work with some warnings with pytest.warns(AstropyUserWarning) as w: hdu.header.append(c1) hdu.header.append(c2) hdu.header.append(c3) assert len(w) == 3 hdu.writeto(self.temp("test.fits")) with pytest.warns(AstropyUserWarning) as w: with fits.open(self.temp("test.fits")) as hdul: # Merely opening the file should blast some warnings about the # invalid keywords assert len(w) == 3 header = hdul[0].header assert "CLFIND2D" in header assert "Just som" in header assert "AAAAAAAA" in header assert header["CLFIND2D"] == ": contour = 0.30" assert header["Just som"] == "e random text." assert header["AAAAAAAA"] == "A" * 72 # It should not be possible to assign to the invalid keywords pytest.raises(ValueError, header.set, "CLFIND2D", "foo") pytest.raises(ValueError, header.set, "Just som", "foo") pytest.raises(ValueError, header.set, "AAAAAAAA", "foo") def test_fix_hierarch_with_invalid_value(self, capsys): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/172 Ensures that when fixing a hierarch card it remains a hierarch card. """ c = fits.Card.fromstring("HIERARCH ESO DET CHIP PXSPACE = 5e6") with pytest.warns( fits.verify.VerifyWarning, match=r"Verification reported errors" ): c.verify("fix") assert str(c) == _pad("HIERARCH ESO DET CHIP PXSPACE = 5E6") def test_assign_inf_nan(self): """ Regression test for https://github.com/spacetelescope/PyFITS/issues/11 For the time being it should not be possible to assign the floating point values inf or nan to a header value, since this is not defined by the FITS standard. """ h = fits.Header() pytest.raises(ValueError, h.set, "TEST", float("nan")) pytest.raises(ValueError, h.set, "TEST", np.nan) pytest.raises(ValueError, h.set, "TEST", np.float32("nan")) pytest.raises(ValueError, h.set, "TEST", float("inf")) pytest.raises(ValueError, h.set, "TEST", np.inf) def test_update_bool(self): """ Regression test for an issue where a value of True in a header cannot be updated to a value of 1, and likewise for False/0. """ h = fits.Header([("TEST", True)]) h["TEST"] = 1 assert h["TEST"] is not True assert isinstance(h["TEST"], int) assert h["TEST"] == 1 h["TEST"] = np.bool_(True) assert h["TEST"] is True h["TEST"] = False assert h["TEST"] is False h["TEST"] = np.bool_(False) assert h["TEST"] is False h["TEST"] = 0 assert h["TEST"] is not False assert isinstance(h["TEST"], int) assert h["TEST"] == 0 h["TEST"] = np.bool_(False) assert h["TEST"] is False def test_update_numeric(self): """ Regression test for https://github.com/spacetelescope/PyFITS/issues/49 Ensure that numeric values can be upcast/downcast between int, float, and complex by assigning values that compare equal to the existing value but are a different type. """ h = fits.Header() h["TEST"] = 1 # int -> float h["TEST"] = 1.0 assert isinstance(h["TEST"], float) assert str(h).startswith("TEST = 1.0") # float -> int h["TEST"] = 1 assert isinstance(h["TEST"], int) assert str(h).startswith("TEST = 1") # int -> complex h["TEST"] = 1.0 + 0.0j assert isinstance(h["TEST"], complex) assert str(h).startswith("TEST = (1.0, 0.0)") # complex -> float h["TEST"] = 1.0 assert isinstance(h["TEST"], float) assert str(h).startswith("TEST = 1.0") # float -> complex h["TEST"] = 1.0 + 0.0j assert isinstance(h["TEST"], complex) assert str(h).startswith("TEST = (1.0, 0.0)") # complex -> int h["TEST"] = 1 assert isinstance(h["TEST"], int) assert str(h).startswith("TEST = 1") # Now the same tests but with zeros h["TEST"] = 0 # int -> float h["TEST"] = 0.0 assert isinstance(h["TEST"], float) assert str(h).startswith("TEST = 0.0") # float -> int h["TEST"] = 0 assert isinstance(h["TEST"], int) assert str(h).startswith("TEST = 0") # int -> complex h["TEST"] = 0.0 + 0.0j assert isinstance(h["TEST"], complex) assert str(h).startswith("TEST = (0.0, 0.0)") # complex -> float h["TEST"] = 0.0 assert isinstance(h["TEST"], float) assert str(h).startswith("TEST = 0.0") # float -> complex h["TEST"] = 0.0 + 0.0j assert isinstance(h["TEST"], complex) assert str(h).startswith("TEST = (0.0, 0.0)") # complex -> int h["TEST"] = 0 assert isinstance(h["TEST"], int) assert str(h).startswith("TEST = 0") def test_newlines_in_commentary(self): """ Regression test for https://github.com/spacetelescope/PyFITS/issues/51 Test data extracted from a header in an actual FITS file found in the wild. Names have been changed to protect the innocent. """ # First ensure that we can't assign new keyword values with newlines in # them h = fits.Header() pytest.raises(ValueError, h.set, "HISTORY", "\n") pytest.raises(ValueError, h.set, "HISTORY", "\nabc") pytest.raises(ValueError, h.set, "HISTORY", "abc\n") pytest.raises(ValueError, h.set, "HISTORY", "abc\ndef") test_cards = [ "HISTORY File modified by user 'wilma' with fv on 2013-04-22T21:42:18 " "HISTORY File modified by user ' fred' with fv on 2013-04-23T11:16:29 " "HISTORY File modified by user ' fred' with fv on 2013-11-04T16:59:14 " "HISTORY File modified by user 'wilma' with fv on 2013-04-22T21:42:18\nFile modif" "HISTORY ied by user 'wilma' with fv on 2013-04-23T11:16:29\nFile modified by use" "HISTORY r ' fred' with fv on 2013-11-04T16:59:14 " "HISTORY File modified by user 'wilma' with fv on 2013-04-22T21:42:18\nFile modif" "HISTORY ied by user 'wilma' with fv on 2013-04-23T11:16:29\nFile modified by use" "HISTORY r ' fred' with fv on 2013-11-04T16:59:14\nFile modified by user 'wilma' " "HISTORY with fv on 2013-04-22T21:42:18\nFile modif\nied by user 'wilma' with fv " "HISTORY on 2013-04-23T11:16:29\nFile modified by use\nr ' fred' with fv on 2013-1" "HISTORY 1-04T16:59:14 " ] for card_image in test_cards: c = fits.Card.fromstring(card_image) if "\n" in card_image: pytest.raises(fits.VerifyError, c.verify, "exception") else: c.verify("exception") def test_long_commentary_card_appended_to_header(self): """ If a HISTORY or COMMENT card with a too-long value is appended to a header with Header.append (as opposed to assigning to hdr['HISTORY'] it fails verification. Regression test for https://github.com/astropy/astropy/issues/11486 """ header = fits.Header() value = "abc" * 90 # this is what Table does when saving its history metadata key to a # FITS file header.append(("history", value)) assert len(header.cards) == 1 # Test Card._split() directly since this was the main problem area key, val = header.cards[0]._split() assert key == "HISTORY" and val == value # Try writing adding this header to an HDU and writing it to a file hdu = fits.PrimaryHDU(header=header) hdu.writeto(self.temp("test.fits"), overwrite=True) def test_header_fromstring_bytes(self): """ Test reading a Header from a `bytes` string. See https://github.com/astropy/astropy/issues/8706 """ with open(self.data("test0.fits"), "rb") as fobj: pri_hdr_from_bytes = fits.Header.fromstring(fobj.read()) pri_hdr = fits.getheader(self.data("test0.fits")) assert pri_hdr["NAXIS"] == pri_hdr_from_bytes["NAXIS"] assert pri_hdr == pri_hdr_from_bytes assert pri_hdr.tostring() == pri_hdr_from_bytes.tostring() def test_set_keyword_with_space(self): """ Regression test for https://github.com/astropy/astropy/issues/10479 """ hdr = fits.Header() hdr["KEY2 "] = 2 hdr["KEY2 "] = 4 assert len(hdr) == 1 assert hdr["KEY2"] == 4 assert hdr["KEY2 "] == 4 def test_strip(self): hdr = fits.getheader(self.data("tb.fits"), ext=1) hdr["FOO"] = "bar" hdr.strip() assert set(hdr) == {"HISTORY", "FOO"} hdr = fits.getheader(self.data("tb.fits"), ext=1) hdr["FOO"] = "bar" hdr = hdr.copy(strip=True) assert set(hdr) == {"HISTORY", "FOO"} def test_update_invalid_card(self): """ Regression test for https://github.com/astropy/astropy/issues/5408 Tests updating the value of a card that is malformatted (with an invalid value literal). This tests two ways of reproducing the problem, one working with a Card object directly, and one when reading/writing a header containing such an invalid card. """ card = fits.Card.fromstring("KW = INF / Comment") card.value = "FIXED" assert tuple(card) == ("KW", "FIXED", "Comment") card.verify("fix") assert tuple(card) == ("KW", "FIXED", "Comment") card = fits.Card.fromstring("KW = INF") hdu = fits.PrimaryHDU() # This is a loophole to write a header containing a malformatted card card._verified = True hdu.header.append(card) hdu.header.tofile(self.temp("bogus.fits")) with fits.open(self.temp("bogus.fits")) as hdul: hdul[0].header["KW"] = -1 hdul.writeto(self.temp("bogus_fixed.fits")) with fits.open(self.temp("bogus_fixed.fits")) as hdul: assert hdul[0].header["KW"] == -1 def test_index_numpy_int(self): header = fits.Header([("A", "FOO"), ("B", 2), ("C", "BAR")]) idx = np.int8(2) assert header[idx] == "BAR" header[idx] = "BAZ" assert header[idx] == "BAZ" header.insert(idx, ("D", 42)) assert header[idx] == 42 header.add_comment("HELLO") header.add_comment("WORLD") assert header["COMMENT"][np.int64(1)] == "WORLD" header.append(("C", "BAZBAZ")) assert header[("C", np.int16(0))] == "BAZ" assert header[("C", np.uint32(1))] == "BAZBAZ" def test_header_data_size(self): """ Tests data size calculation (w/o padding) given a Header. """ hdu = fits.PrimaryHDU() header = hdu.header assert header.data_size == 0 header["BITPIX"] = 32 header["NAXIS"] = 2 header["NAXIS1"] = 100 header["NAXIS2"] = 100 assert header.data_size == 40000 assert header.data_size_padded == 40320 class TestRecordValuedKeywordCards(FitsTestCase): """ Tests for handling of record-valued keyword cards as used by the `FITS WCS distortion paper <https://www.atnf.csiro.au/people/mcalabre/WCS/dcs_20040422.pdf>`__. These tests are derived primarily from the release notes for PyFITS 1.4 (in which this feature was first introduced. Note that extra leading spaces in the `value` fields should be parsed on input, but will be stripped in the cards. """ def setup_method(self): super().setup_method() self._test_header = fits.Header() self._test_header.set("DP1", "NAXIS: 2") self._test_header.set("DP1", "AXIS.1: 1") self._test_header.set("DP1", "AXIS.2: 2") self._test_header.set("DP1", "NAUX: 2") self._test_header.set("DP1", "AUX.1.COEFF.0: 0") self._test_header.set("DP1", "AUX.1.POWER.0: 1") self._test_header.set("DP1", "AUX.1.COEFF.1: 0.00048828125") self._test_header.set("DP1", "AUX.1.POWER.1: 1") def test_initialize_rvkc(self): """ Test different methods for initializing a card that should be recognized as a RVKC """ c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment") assert c.keyword == "DP1.NAXIS" assert c.value == 2.0 assert c.field_specifier == "NAXIS" assert c.comment == "A comment" c = fits.Card.fromstring("DP1 = 'NAXIS: 2.1'") assert c.keyword == "DP1.NAXIS" assert c.value == 2.1 assert c.field_specifier == "NAXIS" c = fits.Card.fromstring("DP1 = 'NAXIS: a'") assert c.keyword == "DP1" assert c.value == "NAXIS: a" assert c.field_specifier is None c = fits.Card("DP1", "NAXIS: 2") assert c.keyword == "DP1.NAXIS" assert c.value == 2.0 assert c.field_specifier == "NAXIS" c = fits.Card("DP1", "NAXIS: 2.0") assert c.keyword == "DP1.NAXIS" assert c.value == 2.0 assert c.field_specifier == "NAXIS" c = fits.Card("DP1", "NAXIS: a") assert c.keyword == "DP1" assert c.value == "NAXIS: a" assert c.field_specifier is None c = fits.Card("DP1.NAXIS", 2) assert c.keyword == "DP1.NAXIS" assert c.value == 2.0 assert c.field_specifier == "NAXIS" c = fits.Card("DP1.NAXIS", 2.0) assert c.keyword == "DP1.NAXIS" assert c.value == 2.0 assert c.field_specifier == "NAXIS" with pytest.warns(fits.verify.VerifyWarning): c = fits.Card("DP1.NAXIS", "a") assert c.keyword == "DP1.NAXIS" assert c.value == "a" assert c.field_specifier is None def test_parse_field_specifier(self): """ Tests that the field_specifier can accessed from a card read from a string before any other attributes are accessed. """ c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment") assert c.field_specifier == "NAXIS" assert c.keyword == "DP1.NAXIS" assert c.value == 2.0 assert c.comment == "A comment" def test_update_field_specifier(self): """ Test setting the field_specifier attribute and updating the card image to reflect the new value. """ c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment") assert c.field_specifier == "NAXIS" c.field_specifier = "NAXIS1" assert c.field_specifier == "NAXIS1" assert c.keyword == "DP1.NAXIS1" assert c.value == 2.0 assert c.comment == "A comment" assert str(c).rstrip() == "DP1 = 'NAXIS1: 2' / A comment" def test_field_specifier_case_senstivity(self): """ The keyword portion of an RVKC should still be case-insensitive, but the field-specifier portion should be case-sensitive. """ header = fits.Header() header.set("abc.def", 1) header.set("abc.DEF", 2) assert header["abc.def"] == 1 assert header["ABC.def"] == 1 assert header["aBc.def"] == 1 assert header["ABC.DEF"] == 2 assert "ABC.dEf" not in header def test_get_rvkc_by_index(self): """ Returning a RVKC from a header via index lookup should return the float value of the card. """ assert self._test_header[0] == 2.0 assert isinstance(self._test_header[0], float) assert self._test_header[1] == 1.0 assert isinstance(self._test_header[1], float) def test_get_rvkc_by_keyword(self): """ Returning a RVKC just via the keyword name should return the full value string of the first card with that keyword. This test was changed to reflect the requirement in ticket https://aeon.stsci.edu/ssb/trac/pyfits/ticket/184--previously it required _test_header['DP1'] to return the parsed float value. """ assert self._test_header["DP1"] == "NAXIS: 2" def test_get_rvkc_by_keyword_and_field_specifier(self): """ Returning a RVKC via the full keyword/field-specifier combination should return the floating point value associated with the RVKC. """ assert self._test_header["DP1.NAXIS"] == 2.0 assert isinstance(self._test_header["DP1.NAXIS"], float) assert self._test_header["DP1.AUX.1.COEFF.1"] == 0.00048828125 def test_access_nonexistent_rvkc(self): """ Accessing a nonexistent RVKC should raise an IndexError for index-based lookup, or a KeyError for keyword lookup (like a normal card). """ pytest.raises(IndexError, lambda x: self._test_header[x], 8) # Test exception with message with pytest.raises(KeyError, match=r"Keyword 'DP1\.AXIS\.3' not found."): self._test_header["DP1.AXIS.3"] def test_update_rvkc(self): """A RVKC can be updated either via index or keyword access.""" self._test_header[0] = 3 assert self._test_header["DP1.NAXIS"] == 3.0 assert isinstance(self._test_header["DP1.NAXIS"], float) self._test_header["DP1.AXIS.1"] = 1.1 assert self._test_header["DP1.AXIS.1"] == 1.1 def test_update_rvkc_2(self): """Regression test for an issue that appeared after SVN r2412.""" h = fits.Header() h["D2IM1.EXTVER"] = 1 assert h["D2IM1.EXTVER"] == 1.0 h["D2IM1.EXTVER"] = 2 assert h["D2IM1.EXTVER"] == 2.0 def test_raw_keyword_value(self): c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment") assert c.rawkeyword == "DP1" assert c.rawvalue == "NAXIS: 2" c = fits.Card("DP1.NAXIS", 2) assert c.rawkeyword == "DP1" assert c.rawvalue == "NAXIS: 2.0" c = fits.Card("DP1.NAXIS", 2.0) assert c.rawkeyword == "DP1" assert c.rawvalue == "NAXIS: 2.0" def test_rvkc_insert_after(self): """ It should be possible to insert a new RVKC after an existing one specified by the full keyword/field-specifier combination.""" self._test_header.set("DP1", "AXIS.3: 1", "a comment", after="DP1.AXIS.2") assert self._test_header[3] == 1 assert self._test_header["DP1.AXIS.3"] == 1 def test_rvkc_delete(self): """ Deleting a RVKC should work as with a normal card by using the full keyword/field-spcifier combination. """ del self._test_header["DP1.AXIS.1"] assert len(self._test_header) == 7 assert list(self._test_header)[0] == "DP1.NAXIS" assert self._test_header[0] == 2 assert list(self._test_header)[1] == "DP1.AXIS.2" # Perform a subsequent delete to make sure all the index mappings were # updated del self._test_header["DP1.AXIS.2"] assert len(self._test_header) == 6 assert list(self._test_header)[0] == "DP1.NAXIS" assert self._test_header[0] == 2 assert list(self._test_header)[1] == "DP1.NAUX" assert self._test_header[1] == 2 def test_pattern_matching_keys(self): """Test the keyword filter strings with RVKCs.""" cl = self._test_header["DP1.AXIS.*"] assert isinstance(cl, fits.Header) assert [str(c).strip() for c in cl.cards] == [ "DP1 = 'AXIS.1: 1'", "DP1 = 'AXIS.2: 2'", ] cl = self._test_header["DP1.N*"] assert [str(c).strip() for c in cl.cards] == [ "DP1 = 'NAXIS: 2'", "DP1 = 'NAUX: 2'", ] cl = self._test_header["DP1.AUX..."] assert [str(c).strip() for c in cl.cards] == [ "DP1 = 'AUX.1.COEFF.0: 0'", "DP1 = 'AUX.1.POWER.0: 1'", "DP1 = 'AUX.1.COEFF.1: 0.00048828125'", "DP1 = 'AUX.1.POWER.1: 1'", ] cl = self._test_header["DP?.NAXIS"] assert [str(c).strip() for c in cl.cards] == ["DP1 = 'NAXIS: 2'"] cl = self._test_header["DP1.A*S.*"] assert [str(c).strip() for c in cl.cards] == [ "DP1 = 'AXIS.1: 1'", "DP1 = 'AXIS.2: 2'", ] def test_pattern_matching_key_deletion(self): """Deletion by filter strings should work.""" del self._test_header["DP1.A*..."] assert len(self._test_header) == 2 assert list(self._test_header)[0] == "DP1.NAXIS" assert self._test_header[0] == 2 assert list(self._test_header)[1] == "DP1.NAUX" assert self._test_header[1] == 2 def test_successive_pattern_matching(self): """ A card list returned via a filter string should be further filterable. """ cl = self._test_header["DP1.A*..."] assert [str(c).strip() for c in cl.cards] == [ "DP1 = 'AXIS.1: 1'", "DP1 = 'AXIS.2: 2'", "DP1 = 'AUX.1.COEFF.0: 0'", "DP1 = 'AUX.1.POWER.0: 1'", "DP1 = 'AUX.1.COEFF.1: 0.00048828125'", "DP1 = 'AUX.1.POWER.1: 1'", ] cl2 = cl["*.*AUX..."] assert [str(c).strip() for c in cl2.cards] == [ "DP1 = 'AUX.1.COEFF.0: 0'", "DP1 = 'AUX.1.POWER.0: 1'", "DP1 = 'AUX.1.COEFF.1: 0.00048828125'", "DP1 = 'AUX.1.POWER.1: 1'", ] def test_rvkc_in_cardlist_keys(self): """ The CardList.keys() method should return full keyword/field-spec values for RVKCs. """ cl = self._test_header["DP1.AXIS.*"] assert list(cl) == ["DP1.AXIS.1", "DP1.AXIS.2"] def test_rvkc_in_cardlist_values(self): """ The CardList.values() method should return the values of all RVKCs as floating point values. """ cl = self._test_header["DP1.AXIS.*"] assert list(cl.values()) == [1.0, 2.0] def test_rvkc_value_attribute(self): """ Individual card values should be accessible by the .value attribute (which should return a float). """ cl = self._test_header["DP1.AXIS.*"] assert cl.cards[0].value == 1.0 assert isinstance(cl.cards[0].value, float) def test_overly_permissive_parsing(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/183 Ensures that cards with standard commentary keywords are never treated as RVKCs. Also ensures that cards not strictly matching the RVKC pattern are not treated as such. """ h = fits.Header() h["HISTORY"] = "AXIS.1: 2" h["HISTORY"] = "AXIS.2: 2" assert "HISTORY.AXIS" not in h assert "HISTORY.AXIS.1" not in h assert "HISTORY.AXIS.2" not in h assert h["HISTORY"] == ["AXIS.1: 2", "AXIS.2: 2"] # This is an example straight out of the ticket where everything after # the '2012' in the date value was being ignored, allowing the value to # successfully be parsed as a "float" h = fits.Header() h["HISTORY"] = "Date: 2012-09-19T13:58:53.756061" assert "HISTORY.Date" not in h assert str(h.cards[0]) == _pad("HISTORY Date: 2012-09-19T13:58:53.756061") c = fits.Card.fromstring(" 'Date: 2012-09-19T13:58:53.756061'") assert c.keyword == "" assert c.value == "'Date: 2012-09-19T13:58:53.756061'" assert c.field_specifier is None h = fits.Header() h["FOO"] = "Date: 2012-09-19T13:58:53.756061" assert "FOO.Date" not in h assert str(h.cards[0]) == _pad("FOO = 'Date: 2012-09-19T13:58:53.756061'") def test_overly_aggressive_rvkc_lookup(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/184 Ensures that looking up a RVKC by keyword only (without the field-specifier) in a header returns the full string value of that card without parsing it as a RVKC. Also ensures that a full field-specifier is required to match a RVKC--a partial field-specifier that doesn't explicitly match any record-valued keyword should result in a KeyError. """ c1 = fits.Card.fromstring("FOO = 'AXIS.1: 2'") c2 = fits.Card.fromstring("FOO = 'AXIS.2: 4'") h = fits.Header([c1, c2]) assert h["FOO"] == "AXIS.1: 2" assert h[("FOO", 1)] == "AXIS.2: 4" assert h["FOO.AXIS.1"] == 2.0 assert h["FOO.AXIS.2"] == 4.0 assert "FOO.AXIS" not in h assert "FOO.AXIS." not in h assert "FOO." not in h pytest.raises(KeyError, lambda: h["FOO.AXIS"]) pytest.raises(KeyError, lambda: h["FOO.AXIS."]) pytest.raises(KeyError, lambda: h["FOO."]) def test_fitsheader_script(self): """Tests the basic functionality of the `fitsheader` script.""" from astropy.io.fits.scripts import fitsheader # Can an extension by specified by the EXTNAME keyword? hf = fitsheader.HeaderFormatter(self.data("zerowidth.fits")) output = hf.parse(extensions=["AIPS FQ"]) assert "EXTNAME = 'AIPS FQ" in output assert "BITPIX" in output # Can we limit the display to one specific keyword? output = hf.parse(extensions=["AIPS FQ"], keywords=["EXTNAME"]) assert "EXTNAME = 'AIPS FQ" in output assert "BITPIX =" not in output assert len(output.split("\n")) == 3 # Can we limit the display to two specific keywords? output = hf.parse(extensions=[1], keywords=["EXTNAME", "BITPIX"]) assert "EXTNAME =" in output assert "BITPIX =" in output assert len(output.split("\n")) == 4 # Can we use wildcards for keywords? output = hf.parse(extensions=[1], keywords=["NAXIS*"]) assert "NAXIS =" in output assert "NAXIS1 =" in output assert "NAXIS2 =" in output hf.close() # Can an extension by specified by the EXTNAME+EXTVER keywords? hf = fitsheader.HeaderFormatter(self.data("test0.fits")) assert "EXTNAME = 'SCI" in hf.parse(extensions=["SCI,2"]) hf.close() # Can we print the original header before decompression? hf = fitsheader.HeaderFormatter(self.data("comp.fits")) assert "XTENSION= 'IMAGE" in hf.parse(extensions=[1], compressed=False) assert "XTENSION= 'BINTABLE" in hf.parse(extensions=[1], compressed=True) hf.close() def test_fitsheader_compressed_from_primary_image_ext(self): """Regression test for issue https://github.com/astropy/astropy/issues/7312""" data = np.arange(2 * 2, dtype=np.int8).reshape((2, 2)) phdu = fits.PrimaryHDU(data=data) chdu = fits.CompImageHDU(data=phdu.data, header=phdu.header) chdu.writeto(self.temp("tmp2.fits"), overwrite=True) with fits.open(self.temp("tmp2.fits")) as hdul: assert "XTENSION" not in hdul[1].header assert "PCOUNT" not in hdul[1].header assert "GCOUNT" not in hdul[1].header def test_fitsheader_table_feature(self): """Tests the `--table` feature of the `fitsheader` script.""" from astropy.io import fits from astropy.io.fits.scripts import fitsheader test_filename = self.data("zerowidth.fits") formatter = fitsheader.TableHeaderFormatter(test_filename) with fits.open(test_filename) as fitsobj: # Does the table contain the expected number of rows? mytable = formatter.parse([0]) assert len(mytable) == len(fitsobj[0].header) # Repeat the above test when multiple HDUs are requested mytable = formatter.parse(extensions=["AIPS FQ", 2, "4"]) assert len(mytable) == ( len(fitsobj["AIPS FQ"].header) + len(fitsobj[2].header) + len(fitsobj[4].header) ) # Can we recover the filename and extension name from the table? mytable = formatter.parse(extensions=["AIPS FQ"]) assert np.all(mytable["filename"] == test_filename) assert np.all(mytable["hdu"] == "AIPS FQ") assert mytable["value"][mytable["keyword"] == "EXTNAME"] == "AIPS FQ" # Can we specify a single extension/keyword? mytable = formatter.parse(extensions=["AIPS FQ"], keywords=["EXTNAME"]) assert len(mytable) == 1 assert mytable["hdu"][0] == "AIPS FQ" assert mytable["keyword"][0] == "EXTNAME" assert mytable["value"][0] == "AIPS FQ" # Is an incorrect extension dealt with gracefully? mytable = formatter.parse(extensions=["DOES_NOT_EXIST"]) assert mytable is None # Is an incorrect keyword dealt with gracefully? mytable = formatter.parse(extensions=["AIPS FQ"], keywords=["DOES_NOT_EXIST"]) assert mytable is None formatter.close() @pytest.mark.parametrize("mode", ["wb", "wb+", "ab", "ab+"]) def test_hdu_writeto_mode(self, mode): with open(self.temp("mode.fits"), mode=mode) as ff: hdu = fits.ImageHDU(data=np.ones(5)) hdu.writeto(ff) def test_subclass(): """Check that subclasses don't get ignored on slicing and copying.""" class MyHeader(fits.Header): def append(self, card, *args, **kwargs): if isinstance(card, tuple) and len(card) == 2: # Just for our checks we add a comment if there is none. card += ("no comment",) return super().append(card, *args, **kwargs) my_header = MyHeader( ( ("a", 1.0, "first"), ("b", 2.0, "second"), ( "c", 3.0, ), ) ) assert my_header.comments["a"] == "first" assert my_header.comments["b"] == "second" assert my_header.comments["c"] == "no comment" slice_ = my_header[1:] assert type(slice_) is MyHeader assert slice_.comments["b"] == "second" assert slice_.comments["c"] == "no comment" selection = my_header["c*"] assert type(selection) is MyHeader assert selection.comments["c"] == "no comment" copy_ = my_header.copy() assert type(copy_) is MyHeader assert copy_.comments["b"] == "second" assert copy_.comments["c"] == "no comment" my_header.extend((("d", 4.0),)) assert my_header.comments["d"] == "no comment"
7592310cab65924e6591edd86dedeb3196f79d2312e79a6d93003f2f3e3b611f
# Licensed under a 3-clause BSD style license - see PYFITS.rst import numpy as np import pytest from astropy.io import fits from astropy.io.fits._tiled_compression import compress_hdu from .conftest import FitsTestCase MAX_INT = np.iinfo(np.intc).max MAX_LONG = np.iinfo(int).max MAX_LONGLONG = np.iinfo(np.longlong).max class TestCompressionFunction(FitsTestCase): def test_wrong_argument_number(self): with pytest.raises(TypeError): compress_hdu(1, 2) def test_unknown_compression_type(self): hdu = fits.CompImageHDU(np.ones((10, 10))) hdu._header["ZCMPTYPE"] = "fun" with pytest.raises(ValueError) as exc: compress_hdu(hdu) assert "Unrecognized compression type: fun" in str(exc.value) def test_zbitpix_unknown(self): hdu = fits.CompImageHDU(np.ones((10, 10))) hdu._header["ZBITPIX"] = 13 with pytest.raises(ValueError) as exc: compress_hdu(hdu) assert "Invalid value for BITPIX: 13" in str(exc.value) def test_data_none(self): hdu = fits.CompImageHDU(np.ones((10, 10))) hdu.data = None with pytest.raises(TypeError) as exc: compress_hdu(hdu) assert "CompImageHDU.data must be a numpy.ndarray" in str(exc.value) def test_missing_internal_header(self): hdu = fits.CompImageHDU(np.ones((10, 10))) del hdu._header with pytest.raises(AttributeError) as exc: compress_hdu(hdu) assert "_header" in str(exc.value) def test_invalid_tform(self): hdu = fits.CompImageHDU(np.ones((10, 10))) hdu._header["TFORM1"] = "TX" with pytest.raises(RuntimeError) as exc: compress_hdu(hdu) assert "TX" in str(exc.value) and "TFORM" in str(exc.value) def test_invalid_zdither(self): hdu = fits.CompImageHDU(np.ones((10, 10)), quantize_method=1) hdu._header["ZDITHER0"] = "a" with pytest.raises(TypeError): compress_hdu(hdu) @pytest.mark.parametrize("kw", ["ZNAXIS", "ZBITPIX"]) def test_header_missing_keyword(self, kw): hdu = fits.CompImageHDU(np.ones((10, 10))) del hdu._header[kw] with pytest.raises(KeyError) as exc: compress_hdu(hdu) assert kw in str(exc.value) @pytest.mark.parametrize("kw", ["ZNAXIS", "ZVAL1", "ZVAL2", "ZBLANK", "BLANK"]) def test_header_value_int_overflow(self, kw): hdu = fits.CompImageHDU(np.ones((10, 10))) hdu._header[kw] = MAX_INT + 1 with pytest.raises(OverflowError): compress_hdu(hdu) @pytest.mark.parametrize("kw", ["ZTILE1", "ZNAXIS1"]) def test_header_value_long_overflow(self, kw): hdu = fits.CompImageHDU(np.ones((10, 10))) hdu._header[kw] = MAX_LONG + 1 with pytest.raises(OverflowError): compress_hdu(hdu) @pytest.mark.parametrize("kw", ["NAXIS1", "NAXIS2", "TNULL1", "PCOUNT", "THEAP"]) def test_header_value_longlong_overflow(self, kw): hdu = fits.CompImageHDU(np.ones((10, 10))) hdu._header[kw] = MAX_LONGLONG + 1 with pytest.raises(OverflowError): compress_hdu(hdu) @pytest.mark.parametrize("kw", ["ZVAL3"]) def test_header_value_float_overflow(self, kw): hdu = fits.CompImageHDU(np.ones((10, 10))) hdu._header[kw] = 1e300 with pytest.raises(OverflowError): compress_hdu(hdu) @pytest.mark.parametrize("kw", ["NAXIS1", "NAXIS2", "TFIELDS", "PCOUNT"]) def test_header_value_negative(self, kw): hdu = fits.CompImageHDU(np.ones((10, 10))) hdu._header[kw] = -1 with pytest.raises(ValueError) as exc: compress_hdu(hdu) assert f"{kw} should not be negative." in str(exc.value) @pytest.mark.parametrize(("kw", "limit"), [("ZNAXIS", 999), ("TFIELDS", 999)]) def test_header_value_exceeds_custom_limit(self, kw, limit): hdu = fits.CompImageHDU(np.ones((10, 10))) hdu._header[kw] = limit + 1 with pytest.raises(ValueError) as exc: compress_hdu(hdu) assert kw in str(exc.value) @pytest.mark.parametrize( "kw", ["TTYPE1", "TFORM1", "ZCMPTYPE", "ZNAME1", "ZQUANTIZ"] ) def test_header_value_no_string(self, kw): hdu = fits.CompImageHDU(np.ones((10, 10))) hdu._header[kw] = 1 with pytest.raises(TypeError): compress_hdu(hdu) @pytest.mark.parametrize("kw", ["TZERO1", "TSCAL1"]) def test_header_value_no_double(self, kw): hdu = fits.CompImageHDU(np.ones((10, 10))) hdu._header[kw] = "1" with pytest.raises(TypeError): compress_hdu(hdu) @pytest.mark.parametrize("kw", ["ZSCALE", "ZZERO"]) def test_header_value_no_double_int_image(self, kw): hdu = fits.CompImageHDU(np.ones((10, 10), dtype=np.int32)) hdu._header[kw] = "1" with pytest.raises(TypeError): compress_hdu(hdu)
f53e26b7259705fdb110c8c1e77db129f80632222b26d7e77ace3e50cccd8095
import gc import warnings import numpy as np import pytest from numpy.testing import assert_allclose, assert_array_equal from astropy import units as u from astropy.io import fits from astropy.io.fits import BinTableHDU, HDUList, ImageHDU, PrimaryHDU, table_to_hdu from astropy.io.fits.column import ( _fortran_to_python_format, _parse_tdisp_format, python_to_tdisp, ) from astropy.io.tests.mixin_columns import compare_attrs, mixin_cols, serialized_names from astropy.table import Column, QTable, Table from astropy.table.table_helpers import simple_table from astropy.time import Time from astropy.units import allclose as quantity_allclose from astropy.units.format.fits import UnitScaleError from astropy.units.quantity import QuantityInfo from astropy.utils.compat import NUMPY_LT_1_22 from astropy.utils.data import get_pkg_data_filename from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyUserWarning from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH # FITS does not preserve precision, in_subfmt, and out_subfmt. time_attrs = ["value", "shape", "format", "scale", "location"] compare_attrs = { name: (time_attrs if isinstance(col, Time) else compare_attrs[name]) for name, col in mixin_cols.items() } # FITS does not support multi-element location, array with object dtype, # or logarithmic quantities. unsupported_cols = { name: col for name, col in mixin_cols.items() if ( isinstance(col, Time) and col.location.shape != () or isinstance(col, np.ndarray) and col.dtype.kind == "O" or isinstance(col, u.LogQuantity) ) } mixin_cols = { name: col for name, col in mixin_cols.items() if name not in unsupported_cols } def equal_data(a, b): return all(np.all(a[name] == b[name]) for name in a.dtype.names) class TestSingleTable: def setup_class(self): self.data = np.array( list(zip([1, 2, 3, 4], ["a", "b", "c", "d"], [2.3, 4.5, 6.7, 8.9])), dtype=[("a", int), ("b", "U1"), ("c", float)], ) def test_simple(self, tmp_path): filename = tmp_path / "test_simple.fts" t1 = Table(self.data) t1.write(filename, overwrite=True) t2 = Table.read(filename) assert equal_data(t1, t2) def test_simple_pathlib(self, tmp_path): filename = tmp_path / "test_simple.fit" t1 = Table(self.data) t1.write(filename, overwrite=True) t2 = Table.read(filename) assert equal_data(t1, t2) def test_simple_meta(self, tmp_path): filename = tmp_path / "test_simple.fits" t1 = Table(self.data) t1.meta["A"] = 1 t1.meta["B"] = 2.3 t1.meta["C"] = "spam" t1.meta["comments"] = ["this", "is", "a", "long", "comment"] t1.meta["HISTORY"] = ["first", "second", "third"] t1.write(filename, overwrite=True) t2 = Table.read(filename) assert equal_data(t1, t2) for key in t1.meta: if isinstance(t1.meta, list): for i in range(len(t1.meta[key])): assert t1.meta[key][i] == t2.meta[key][i] else: assert t1.meta[key] == t2.meta[key] def test_simple_meta_conflicting(self, tmp_path): filename = tmp_path / "test_simple.fits" t1 = Table(self.data) t1.meta["ttype1"] = "spam" with pytest.warns( AstropyUserWarning, match=( "Meta-data keyword ttype1 " "will be ignored since it conflicts with a FITS " "reserved keyword" ), ) as w: t1.write(filename, overwrite=True) assert len(w) == 1 def test_simple_noextension(self, tmp_path): """ Test that file type is recognized without extension """ filename = tmp_path / "test_simple" t1 = Table(self.data) t1.write(filename, overwrite=True, format="fits") t2 = Table.read(filename) assert equal_data(t1, t2) @pytest.mark.parametrize("table_type", (Table, QTable)) def test_with_units(self, table_type, tmp_path): filename = tmp_path / "test_with_units.fits" t1 = table_type(self.data) t1["a"].unit = u.m t1["c"].unit = u.km / u.s t1.write(filename, overwrite=True) t2 = table_type.read(filename) assert equal_data(t1, t2) assert t2["a"].unit == u.m assert t2["c"].unit == u.km / u.s def test_with_custom_units_qtable(self, tmp_path): # Test only for QTable - for Table's Column, new units are dropped # (as is checked in test_write_drop_nonstandard_units). filename = tmp_path / "test_with_units.fits" unit = u.def_unit("bandpass_sol_lum") t = QTable() t["l"] = np.ones(5) * unit with pytest.warns(AstropyUserWarning) as w: t.write(filename, overwrite=True) assert len(w) == 1 assert "bandpass_sol_lum" in str(w[0].message) # Just reading back, the data is fine but the unit is not recognized. with pytest.warns( u.UnitsWarning, match="'bandpass_sol_lum' did not parse" ) as w: t2 = QTable.read(filename) assert len(w) == 1 assert isinstance(t2["l"].unit, u.UnrecognizedUnit) assert str(t2["l"].unit) == "bandpass_sol_lum" assert np.all(t2["l"].value == t["l"].value) # But if we enable the unit, it should be recognized. with u.add_enabled_units(unit): t3 = QTable.read(filename) assert t3["l"].unit is unit assert equal_data(t3, t) # Regression check for #8897; write used to fail when a custom # unit was enabled. with pytest.warns(AstropyUserWarning): t3.write(filename, overwrite=True) # It should also be possible to read the file in using a unit alias, # even to a unit that may not be the same. with u.set_enabled_aliases({"bandpass_sol_lum": u.Lsun}): t3 = QTable.read(filename) assert t3["l"].unit is u.Lsun @pytest.mark.parametrize("table_type", (Table, QTable)) def test_read_with_unit_aliases(self, table_type): hdu = BinTableHDU(self.data) hdu.columns[0].unit = "Angstroms" hdu.columns[2].unit = "ergs/(cm.s.Angstroms)" with u.set_enabled_aliases(dict(Angstroms=u.AA, ergs=u.erg)): t = table_type.read(hdu) assert t["a"].unit == u.AA assert t["c"].unit == u.erg / (u.cm * u.s * u.AA) @pytest.mark.parametrize("table_type", (Table, QTable)) def test_with_format(self, table_type, tmp_path): filename = tmp_path / "test_with_format.fits" t1 = table_type(self.data) t1["a"].format = "{:5d}" t1["b"].format = "{:>20}" t1["c"].format = "{:6.2f}" t1.write(filename, overwrite=True) t2 = table_type.read(filename) assert equal_data(t1, t2) assert t2["a"].format == "{:5d}" assert t2["b"].format == "{:>20}" assert t2["c"].format == "{:6.2f}" def test_masked(self, tmp_path): filename = tmp_path / "test_masked.fits" t1 = Table(self.data, masked=True) t1.mask["a"] = [1, 0, 1, 0] t1.mask["b"] = [1, 0, 0, 1] t1.mask["c"] = [0, 1, 1, 0] t1.write(filename, overwrite=True) t2 = Table.read(filename) assert equal_data(t1, t2) assert np.all(t1["a"].mask == t2["a"].mask) assert np.all(t1["b"].mask == t2["b"].mask) assert np.all(t1["c"].mask == t2["c"].mask) @pytest.mark.parametrize("masked", [True, False]) def test_masked_nan(self, masked, tmp_path): """Check that masked values by default are replaced by NaN. This should work for any shape and be independent of whether the Table is formally masked or not. """ filename = tmp_path / "test_masked_nan.fits" a = np.ma.MaskedArray([5.25, 8.5, 3.75, 6.25], mask=[1, 0, 1, 0]) b = np.ma.MaskedArray([2.5, 4.5, 6.75, 8.875], mask=[1, 0, 0, 1], dtype="f4") c = np.ma.stack([a, b], axis=-1) t1 = Table([a, b, c], names=["a", "b", "c"], masked=masked) t1.write(filename, overwrite=True) t2 = Table.read(filename) assert_array_equal(t2["a"].data, [np.nan, 8.5, np.nan, 6.25]) assert_array_equal(t2["b"].data, [np.nan, 4.5, 6.75, np.nan]) assert_array_equal( t2["c"].data, np.stack([t2["a"].data, t2["b"].data], axis=-1) ) assert np.all(t1["a"].mask == t2["a"].mask) assert np.all(t1["b"].mask == t2["b"].mask) assert np.all(t1["c"].mask == t2["c"].mask) def test_masked_serialize_data_mask(self, tmp_path): filename = tmp_path / "test_masked_nan.fits" a = np.ma.MaskedArray([5.25, 8.5, 3.75, 6.25], mask=[1, 0, 1, 0]) b = np.ma.MaskedArray([2.5, 4.5, 6.75, 8.875], mask=[1, 0, 0, 1]) c = np.ma.stack([a, b], axis=-1) t1 = Table([a, b, c], names=["a", "b", "c"]) t1.write(filename, overwrite=True) t2 = Table.read(filename) assert_array_equal(t2["a"].data, [5.25, 8.5, 3.75, 6.25]) assert_array_equal(t2["b"].data, [2.5, 4.5, 6.75, 8.875]) assert_array_equal( t2["c"].data, np.stack([t2["a"].data, t2["b"].data], axis=-1) ) assert np.all(t1["a"].mask == t2["a"].mask) assert np.all(t1["b"].mask == t2["b"].mask) assert np.all(t1["c"].mask == t2["c"].mask) def test_read_from_fileobj(self, tmp_path): filename = tmp_path / "test_read_from_fileobj.fits" hdu = BinTableHDU(self.data) hdu.writeto(filename, overwrite=True) with open(filename, "rb") as f: t = Table.read(f) assert equal_data(t, self.data) def test_read_with_nonstandard_units(self): hdu = BinTableHDU(self.data) hdu.columns[0].unit = "RADIANS" hdu.columns[1].unit = "spam" hdu.columns[2].unit = "millieggs" with pytest.warns(u.UnitsWarning, match="did not parse as fits unit"): t = Table.read(hdu) assert equal_data(t, self.data) @pytest.mark.parametrize("table_type", (Table, QTable)) def test_write_drop_nonstandard_units(self, table_type, tmp_path): # While we are generous on input (see above), we are strict on # output, dropping units not recognized by the fits standard. filename = tmp_path / "test_nonstandard_units.fits" spam = u.def_unit("spam") t = table_type() t["a"] = [1.0, 2.0, 3.0] * spam with pytest.warns(AstropyUserWarning, match="spam") as w: t.write(filename) assert len(w) == 1 if table_type is Table: assert "cannot be recovered in reading. " in str(w[0].message) else: assert "lost to non-astropy fits readers" in str(w[0].message) with fits.open(filename) as ff: hdu = ff[1] assert "TUNIT1" not in hdu.header def test_memmap(self, tmp_path): filename = tmp_path / "test_simple.fts" t1 = Table(self.data) t1.write(filename, overwrite=True) t2 = Table.read(filename, memmap=False) t3 = Table.read(filename, memmap=True) assert equal_data(t2, t3) # To avoid issues with --open-files, we need to remove references to # data that uses memory mapping and force the garbage collection del t1, t2, t3 gc.collect() @pytest.mark.parametrize("memmap", (False, True)) def test_character_as_bytes(self, tmp_path, memmap): filename = tmp_path / "test_simple.fts" t1 = Table(self.data) t1.write(filename, overwrite=True) t2 = Table.read(filename, character_as_bytes=False, memmap=memmap) t3 = Table.read(filename, character_as_bytes=True, memmap=memmap) assert t2["b"].dtype.kind == "U" assert t3["b"].dtype.kind == "S" assert equal_data(t2, t3) # To avoid issues with --open-files, we need to remove references to # data that uses memory mapping and force the garbage collection del t1, t2, t3 gc.collect() def test_oned_single_element(self, tmp_path): filename = tmp_path / "test_oned_single_element.fits" table = Table({"x": [[1], [2]]}) table.write(filename, overwrite=True) read = Table.read(filename) assert read["x"].shape == (2, 1) assert len(read["x"][0]) == 1 def test_write_append(self, tmp_path): t = Table(self.data) hdu = table_to_hdu(t) def check_equal(filename, expected, start_from=1): with fits.open(filename) as hdu_list: assert len(hdu_list) == expected for hdu_table in hdu_list[start_from:]: assert hdu_table.header == hdu.header assert np.all(hdu_table.data == hdu.data) filename = tmp_path / "test_write_append.fits" t.write(filename, append=True) t.write(filename, append=True) check_equal(filename, 3) # Check the overwrite works correctly. t.write(filename, append=True, overwrite=True) t.write(filename, append=True) check_equal(filename, 3) # Normal write, check it's not appending. t.write(filename, overwrite=True) t.write(filename, overwrite=True) check_equal(filename, 2) # Now write followed by append, with different shaped tables. t2 = Table(np.array([1, 2])) t2.write(filename, overwrite=True) t.write(filename, append=True) check_equal(filename, 3, start_from=2) assert equal_data(t2, Table.read(filename, hdu=1)) def test_write_overwrite(self, tmp_path): t = Table(self.data) filename = tmp_path / "test_write_overwrite.fits" t.write(filename) with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH): t.write(filename) t.write(filename, overwrite=True) def test_mask_nans_on_read(self, tmp_path): filename = tmp_path / "test_inexact_format_parse_on_read.fits" c1 = fits.Column(name="a", array=np.array([1, 2, np.nan]), format="E") table_hdu = fits.TableHDU.from_columns([c1]) table_hdu.writeto(filename) tab = Table.read(filename) assert any(tab.mask) assert tab.mask[2] tab = Table.read(filename, mask_invalid=False) assert tab.mask is None # using memmap also deactivate the masking tab = Table.read(filename, memmap=True) assert tab.mask is None def test_mask_null_on_read(self, tmp_path): filename = tmp_path / "test_null_format_parse_on_read.fits" col = fits.Column( name="a", array=np.array([1, 2, 99, 60000], dtype="u2"), format="I", null=99, bzero=32768, ) bin_table_hdu = fits.BinTableHDU.from_columns([col]) bin_table_hdu.writeto(filename, overwrite=True) tab = Table.read(filename) assert any(tab.mask) assert tab.mask[2] def test_mask_str_on_read(self, tmp_path): filename = tmp_path / "test_null_format_parse_on_read.fits" col = fits.Column( name="a", array=np.array([b"foo", b"bar", b""], dtype="|S3"), format="A3" ) bin_table_hdu = fits.BinTableHDU.from_columns([col]) bin_table_hdu.writeto(filename, overwrite=True) tab = Table.read(filename) assert any(tab.mask) assert tab.mask[2] tab = Table.read(filename, mask_invalid=False) assert tab.mask is None class TestMultipleHDU: def setup_class(self): self.data1 = np.array( list(zip([1, 2, 3, 4], ["a", "b", "c", "d"], [2.3, 4.5, 6.7, 8.9])), dtype=[("a", int), ("b", "U1"), ("c", float)], ) self.data2 = np.array( list(zip([1.4, 2.3, 3.2, 4.7], [2.3, 4.5, 6.7, 8.9])), dtype=[("p", float), ("q", float)], ) self.data3 = np.array( list(zip([1, 2, 3, 4], [2.3, 4.5, 6.7, 8.9])), dtype=[("A", int), ("B", float)], ) hdu0 = PrimaryHDU() hdu1 = BinTableHDU(self.data1, name="first") hdu2 = BinTableHDU(self.data2, name="second") hdu3 = ImageHDU(np.ones((3, 3)), name="third") hdu4 = BinTableHDU(self.data3) self.hdus = HDUList([hdu0, hdu1, hdu2, hdu3, hdu4]) self.hdusb = HDUList([hdu0, hdu3, hdu2, hdu1]) self.hdus3 = HDUList([hdu0, hdu3, hdu2]) self.hdus2 = HDUList([hdu0, hdu1, hdu3]) self.hdus1 = HDUList([hdu0, hdu1]) def teardown_class(self): del self.hdus def setup_method(self, method): warnings.filterwarnings("always") def test_read(self, tmp_path): filename = tmp_path / "test_read.fits" self.hdus.writeto(filename) with pytest.warns( AstropyUserWarning, match=r"hdu= was not specified but multiple tables " r"are present, reading in first available " r"table \(hdu=1\)", ): t = Table.read(filename) assert equal_data(t, self.data1) filename = tmp_path / "test_read_2.fits" self.hdusb.writeto(filename) with pytest.warns( AstropyUserWarning, match=r"hdu= was not specified but multiple tables " r"are present, reading in first available " r"table \(hdu=2\)", ): t3 = Table.read(filename) assert equal_data(t3, self.data2) def test_read_with_hdu_0(self, tmp_path): filename = tmp_path / "test_read_with_hdu_0.fits" self.hdus.writeto(filename) with pytest.raises(ValueError) as exc: Table.read(filename, hdu=0) assert exc.value.args[0] == "No table found in hdu=0" @pytest.mark.parametrize("hdu", [1, "first"]) def test_read_with_hdu_1(self, tmp_path, hdu): filename = tmp_path / "test_read_with_hdu_1.fits" self.hdus.writeto(filename) t = Table.read(filename, hdu=hdu) assert equal_data(t, self.data1) @pytest.mark.parametrize("hdu", [2, "second"]) def test_read_with_hdu_2(self, tmp_path, hdu): filename = tmp_path / "test_read_with_hdu_2.fits" self.hdus.writeto(filename) t = Table.read(filename, hdu=hdu) assert equal_data(t, self.data2) @pytest.mark.parametrize("hdu", [3, "third"]) def test_read_with_hdu_3(self, tmp_path, hdu): filename = tmp_path / "test_read_with_hdu_3.fits" self.hdus.writeto(filename) with pytest.raises(ValueError, match="No table found in hdu=3"): Table.read(filename, hdu=hdu) def test_read_with_hdu_4(self, tmp_path): filename = tmp_path / "test_read_with_hdu_4.fits" self.hdus.writeto(filename) t = Table.read(filename, hdu=4) assert equal_data(t, self.data3) @pytest.mark.parametrize("hdu", [2, 3, "1", "second", ""]) def test_read_with_hdu_missing(self, tmp_path, hdu): filename = tmp_path / "test_warn_with_hdu_1.fits" self.hdus1.writeto(filename) with pytest.warns( AstropyDeprecationWarning, match=rf"Specified hdu={hdu} not found, " r"reading in first available table \(hdu=1\)", ): t1 = Table.read(filename, hdu=hdu) assert equal_data(t1, self.data1) @pytest.mark.parametrize("hdu", [0, 2, "third"]) def test_read_with_hdu_warning(self, tmp_path, hdu): filename = tmp_path / "test_warn_with_hdu_2.fits" self.hdus2.writeto(filename) with pytest.warns( AstropyDeprecationWarning, match=rf"No table found in specified hdu={hdu}, " r"reading in first available table \(hdu=1\)", ): t2 = Table.read(filename, hdu=hdu) assert equal_data(t2, self.data1) @pytest.mark.parametrize("hdu", [0, 1, "third"]) def test_read_in_last_hdu(self, tmp_path, hdu): filename = tmp_path / "test_warn_with_hdu_3.fits" self.hdus3.writeto(filename) with pytest.warns( AstropyDeprecationWarning, match=rf"No table found in specified hdu={hdu}, " r"reading in first available table \(hdu=2\)", ): t3 = Table.read(filename, hdu=hdu) assert equal_data(t3, self.data2) def test_read_from_hdulist(self): with pytest.warns( AstropyUserWarning, match=r"hdu= was not specified but multiple tables " r"are present, reading in first available " r"table \(hdu=1\)", ): t = Table.read(self.hdus) assert equal_data(t, self.data1) with pytest.warns( AstropyUserWarning, match=r"hdu= was not specified but multiple tables " r"are present, reading in first available " r"table \(hdu=2\)", ): t3 = Table.read(self.hdusb) assert equal_data(t3, self.data2) def test_read_from_hdulist_with_hdu_0(self): with pytest.raises(ValueError) as exc: Table.read(self.hdus, hdu=0) assert exc.value.args[0] == "No table found in hdu=0" @pytest.mark.parametrize("hdu", [1, "first", None]) def test_read_from_hdulist_with_single_table(self, hdu): t = Table.read(self.hdus1, hdu=hdu) assert equal_data(t, self.data1) @pytest.mark.parametrize("hdu", [1, "first"]) def test_read_from_hdulist_with_hdu_1(self, hdu): t = Table.read(self.hdus, hdu=hdu) assert equal_data(t, self.data1) @pytest.mark.parametrize("hdu", [2, "second"]) def test_read_from_hdulist_with_hdu_2(self, hdu): t = Table.read(self.hdus, hdu=hdu) assert equal_data(t, self.data2) @pytest.mark.parametrize("hdu", [3, "third"]) def test_read_from_hdulist_with_hdu_3(self, hdu): with pytest.raises(ValueError, match="No table found in hdu=3"): Table.read(self.hdus, hdu=hdu) @pytest.mark.parametrize("hdu", [0, 2, "third"]) def test_read_from_hdulist_with_hdu_warning(self, hdu): with pytest.warns( AstropyDeprecationWarning, match=rf"No table found in specified hdu={hdu}, " r"reading in first available table \(hdu=1\)", ): t2 = Table.read(self.hdus2, hdu=hdu) assert equal_data(t2, self.data1) @pytest.mark.parametrize("hdu", [2, 3, "1", "second", ""]) def test_read_from_hdulist_with_hdu_missing(self, hdu): with pytest.warns( AstropyDeprecationWarning, match=rf"Specified hdu={hdu} not found, " r"reading in first available table \(hdu=1\)", ): t1 = Table.read(self.hdus1, hdu=hdu) assert equal_data(t1, self.data1) @pytest.mark.parametrize("hdu", [0, 1, "third"]) def test_read_from_hdulist_in_last_hdu(self, hdu): with pytest.warns( AstropyDeprecationWarning, match=rf"No table found in specified hdu={hdu}, " r"reading in first available table \(hdu=2\)", ): t3 = Table.read(self.hdus3, hdu=hdu) assert equal_data(t3, self.data2) @pytest.mark.parametrize("hdu", [None, 1, "first"]) def test_read_from_single_hdu(self, hdu): t = Table.read(self.hdus[1]) assert equal_data(t, self.data1) def test_masking_regression_1795(): """ Regression test for #1795 - this bug originally caused columns where TNULL was not defined to have their first element masked. """ t = Table.read(get_pkg_data_filename("data/tb.fits")) assert np.all(t["c1"].mask == np.array([False, False])) assert not hasattr(t["c2"], "mask") assert not hasattr(t["c3"], "mask") assert not hasattr(t["c4"], "mask") assert np.all(t["c1"].data == np.array([1, 2])) assert np.all(t["c2"].data == np.array([b"abc", b"xy "])) assert_allclose(t["c3"].data, np.array([3.70000007153, 6.6999997139])) assert np.all(t["c4"].data == np.array([False, True])) def test_scale_error(): a = [1, 4, 5] b = [2.0, 5.0, 8.2] c = ["x", "y", "z"] t = Table([a, b, c], names=("a", "b", "c"), meta={"name": "first table"}) t["a"].unit = "1.2" with pytest.raises( UnitScaleError, match=r"The column 'a' could not be " r"stored in FITS format because it has a scale '\(1\.2\)'" r" that is not recognized by the FITS standard\. Either " r"scale the data or change the units\.", ): t.write("t.fits", format="fits", overwrite=True) @pytest.mark.parametrize( "tdisp_str, format_return", [ ("EN10.5", ("EN", "10", "5", None)), ("F6.2", ("F", "6", "2", None)), ("B5.10", ("B", "5", "10", None)), ("E10.5E3", ("E", "10", "5", "3")), ("A21", ("A", "21", None, None)), ], ) def test_parse_tdisp_format(tdisp_str, format_return): assert _parse_tdisp_format(tdisp_str) == format_return @pytest.mark.parametrize( "tdisp_str, format_str_return", [ ("G15.4E2", "{:15.4g}"), ("Z5.10", "{:5x}"), ("I6.5", "{:6d}"), ("L8", "{:>8}"), ("E20.7", "{:20.7e}"), ], ) def test_fortran_to_python_format(tdisp_str, format_str_return): assert _fortran_to_python_format(tdisp_str) == format_str_return @pytest.mark.parametrize( "fmt_str, tdisp_str", [ ("{:3d}", "I3"), ("3d", "I3"), ("7.3f", "F7.3"), ("{:>4}", "A4"), ("{:7.4f}", "F7.4"), ("%5.3g", "G5.3"), ("%10s", "A10"), ("%.4f", "F13.4"), ], ) def test_python_to_tdisp(fmt_str, tdisp_str): assert python_to_tdisp(fmt_str) == tdisp_str def test_logical_python_to_tdisp(): assert python_to_tdisp("{:>7}", logical_dtype=True) == "L7" def test_bool_column(tmp_path): """ Regression test for https://github.com/astropy/astropy/issues/1953 Ensures that Table columns of bools are properly written to a FITS table. """ arr = np.ones(5, dtype=bool) arr[::2] == np.False_ t = Table([arr]) t.write(tmp_path / "test.fits", overwrite=True) with fits.open(tmp_path / "test.fits") as hdul: assert hdul[1].data["col0"].dtype == np.dtype("bool") assert np.all(hdul[1].data["col0"] == arr) def test_unicode_column(tmp_path): """ Test that a column of unicode strings is still written as one byte-per-character in the FITS table (so long as the column can be ASCII encoded). Regression test for one of the issues fixed in https://github.com/astropy/astropy/pull/4228 """ t = Table([np.array(["a", "b", "cd"])]) t.write(tmp_path / "test.fits", overwrite=True) with fits.open(tmp_path / "test.fits") as hdul: assert np.all(hdul[1].data["col0"] == ["a", "b", "cd"]) assert hdul[1].header["TFORM1"] == "2A" t2 = Table([np.array(["\N{SNOWMAN}"])]) with pytest.raises(UnicodeEncodeError): t2.write(tmp_path / "test.fits", overwrite=True) def test_unit_warnings_read_write(tmp_path): filename = tmp_path / "test_unit.fits" t1 = Table([[1, 2], [3, 4]], names=["a", "b"]) t1["a"].unit = "m/s" t1["b"].unit = "not-a-unit" with pytest.warns( u.UnitsWarning, match="'not-a-unit' did not parse as fits unit" ) as w: t1.write(filename, overwrite=True) assert len(w) == 1 with pytest.warns( u.UnitsWarning, match="'not-a-unit' did not parse as fits unit" ) as w: Table.read(filename, hdu=1) def test_convert_comment_convention(): """ Regression test for https://github.com/astropy/astropy/issues/6079 """ filename = get_pkg_data_filename("data/stddata.fits") with pytest.warns( AstropyUserWarning, match=r"hdu= was not specified but " r"multiple tables are present", ): t = Table.read(filename) assert t.meta["comments"] == [ "", " *** End of mandatory fields ***", "", "", " *** Column names ***", "", "", " *** Column formats ***", "", ] def assert_objects_equal(obj1, obj2, attrs, compare_class=True): if compare_class: assert obj1.__class__ is obj2.__class__ info_attrs = [ "info.name", "info.format", "info.unit", "info.description", "info.meta", "info.dtype", ] for attr in attrs + info_attrs: a1 = obj1 a2 = obj2 for subattr in attr.split("."): try: a1 = getattr(a1, subattr) a2 = getattr(a2, subattr) except AttributeError: a1 = a1[subattr] a2 = a2[subattr] # Mixin info.meta can None instead of empty OrderedDict(), #6720 would # fix this. if attr == "info.meta": if a1 is None: a1 = {} if a2 is None: a2 = {} if isinstance(a1, np.ndarray) and a1.dtype.kind == "f": assert quantity_allclose(a1, a2, rtol=1e-15) elif isinstance(a1, np.dtype): # FITS does not perfectly preserve dtype: byte order can change, and # unicode gets stored as bytes. So, we just check safe casting, to # ensure we do not, e.g., accidentally change integer to float, etc. if NUMPY_LT_1_22 and a1.names: # For old numpy, can_cast does not deal well with structured dtype. assert a1.names == a2.names else: assert np.can_cast(a2, a1, casting="safe") else: assert np.all(a1 == a2) def test_fits_mixins_qtable_to_table(tmp_path): """Test writing as QTable and reading as Table. Ensure correct classes come out. """ filename = tmp_path / "test_simple.fits" names = sorted(mixin_cols) t = QTable([mixin_cols[name] for name in names], names=names) t.write(filename, format="fits") t2 = Table.read(filename, format="fits", astropy_native=True) assert t.colnames == t2.colnames for name, col in t.columns.items(): col2 = t2[name] # Special-case Time, which does not yet support round-tripping # the format. if isinstance(col2, Time): col2.format = col.format attrs = compare_attrs[name] compare_class = True if isinstance(col.info, QuantityInfo): # Downgrade Quantity to Column + unit assert type(col2) is Column # Class-specific attributes like `value` or `wrap_angle` are lost. attrs = ["unit"] compare_class = False # Compare data values here (assert_objects_equal doesn't know how in this case) assert np.all(col.value == col2) assert_objects_equal(col, col2, attrs, compare_class) @pytest.mark.parametrize("table_cls", (Table, QTable)) def test_fits_mixins_as_one(table_cls, tmp_path): """Test write/read all cols at once and validate intermediate column names""" filename = tmp_path / "test_simple.fits" names = sorted(mixin_cols) # FITS stores times directly, so we just get the column back. all_serialized_names = [] for name in sorted(mixin_cols): all_serialized_names.extend( [name] if isinstance(mixin_cols[name], Time) else serialized_names[name] ) t = table_cls([mixin_cols[name] for name in names], names=names) t.meta["C"] = "spam" t.meta["comments"] = ["this", "is", "a", "comment"] t.meta["history"] = ["first", "second", "third"] t.write(filename, format="fits") t2 = table_cls.read(filename, format="fits", astropy_native=True) assert t2.meta["C"] == "spam" assert t2.meta["comments"] == ["this", "is", "a", "comment"] assert t2.meta["HISTORY"] == ["first", "second", "third"] assert t.colnames == t2.colnames # Read directly via fits and confirm column names with fits.open(filename) as hdus: assert hdus[1].columns.names == all_serialized_names @pytest.mark.parametrize("name_col", list(mixin_cols.items())) @pytest.mark.parametrize("table_cls", (Table, QTable)) def test_fits_mixins_per_column(table_cls, name_col, tmp_path): """Test write/read one col at a time and do detailed validation""" filename = tmp_path / "test_simple.fits" name, col = name_col c = [1.0, 2.0] t = table_cls([c, col, c], names=["c1", name, "c2"]) t[name].info.description = "my \n\n\n description" t[name].info.meta = {"list": list(range(50)), "dict": {"a": "b" * 200}} if not t.has_mixin_columns: pytest.skip("column is not a mixin (e.g. Quantity subclass in Table)") t.write(filename, format="fits") t2 = table_cls.read(filename, format="fits", astropy_native=True) if isinstance(col, Time): # FITS Time does not preserve format t2[name].format = col.format assert t.colnames == t2.colnames for colname in t.colnames: compare = ["data"] if colname in ("c1", "c2") else compare_attrs[colname] assert_objects_equal(t[colname], t2[colname], compare) # Special case to make sure Column type doesn't leak into Time class data if name.startswith("tm"): assert t2[name]._time.jd1.__class__ is np.ndarray assert t2[name]._time.jd2.__class__ is np.ndarray @pytest.mark.parametrize("name_col", unsupported_cols.items()) @pytest.mark.xfail(reason="column type unsupported") def test_fits_unsupported_mixin(self, name_col, tmp_path): # Check that we actually fail in writing unsupported columns defined # on top. filename = tmp_path / "test_simple.fits" name, col = name_col Table([col], names=[name]).write(filename, format="fits") def test_info_attributes_with_no_mixins(tmp_path): """Even if there are no mixin columns, if there is metadata that would be lost it still gets serialized """ filename = tmp_path / "test.fits" t = Table([[1.0, 2.0]]) t["col0"].description = "hello" * 40 t["col0"].format = "{:8.4f}" t["col0"].meta["a"] = {"b": "c"} t.write(filename, overwrite=True) t2 = Table.read(filename) assert t2["col0"].description == "hello" * 40 assert t2["col0"].format == "{:8.4f}" assert t2["col0"].meta["a"] == {"b": "c"} @pytest.mark.parametrize("method", ["set_cols", "names", "class"]) def test_round_trip_masked_table_serialize_mask(tmp_path, method): """ Same as previous test but set the serialize_method to 'data_mask' so mask is written out and the behavior is all correct. """ filename = tmp_path / "test.fits" t = simple_table(masked=True) # int, float, and str cols with one masked element # MaskedColumn but no masked elements. See table the MaskedColumnInfo class # _represent_as_dict() method for info about we test a column with no masked elements. t["d"] = [1, 2, 3] if method == "set_cols": for col in t.itercols(): col.info.serialize_method["fits"] = "data_mask" t.write(filename) elif method == "names": t.write( filename, serialize_method={ "a": "data_mask", "b": "data_mask", "c": "data_mask", "d": "data_mask", }, ) elif method == "class": t.write(filename, serialize_method="data_mask") t2 = Table.read(filename) assert t2.masked is False assert t2.colnames == t.colnames for name in t2.colnames: assert np.all(t2[name].mask == t[name].mask) assert np.all(t2[name] == t[name]) # Data under the mask round-trips also (unmask data to show this). t[name].mask = False t2[name].mask = False assert np.all(t2[name] == t[name]) def test_meta_not_modified(tmp_path): filename = tmp_path / "test.fits" t = Table(data=[Column([1, 2], "a", description="spam")]) t.meta["comments"] = ["a", "b"] assert len(t.meta) == 1 t.write(filename) assert len(t.meta) == 1 assert t.meta["comments"] == ["a", "b"]
d3c54d1e265c5d114615eda6f853287c5364c6f4712275d27b2f6e1038527302
# Licensed under a 3-clause BSD style license - see PYFITS.rst import errno import gzip import io import mmap import os import pathlib import shutil import sys import urllib.request import zipfile from unittest.mock import patch import numpy as np import pytest from astropy.io import fits from astropy.io.fits.convenience import _getext from astropy.io.fits.diff import FITSDiff from astropy.io.fits.file import GZIP_MAGIC, _File from astropy.io.tests import safeio from astropy.utils import data # NOTE: Python can be built without bz2. from astropy.utils.compat.optional_deps import HAS_BZ2 from astropy.utils.data import conf from astropy.utils.exceptions import AstropyUserWarning from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH from .conftest import FitsTestCase if HAS_BZ2: import bz2 class TestCore(FitsTestCase): def test_missing_file(self): with pytest.raises(OSError): fits.open(self.temp("does-not-exist.fits")) def test_naxisj_check(self): with fits.open(self.data("o4sp040b0_raw.fits")) as hdulist: hdulist[1].header["NAXIS3"] = 500 assert "NAXIS3" in hdulist[1].header hdulist.verify("silentfix") assert "NAXIS3" not in hdulist[1].header def test_byteswap(self): p = fits.PrimaryHDU() lst = fits.HDUList() n = np.array([1, 60000, 0], dtype="u2").astype("i2") c = fits.Column(name="foo", format="i2", bscale=1, bzero=32768, array=n) t = fits.BinTableHDU.from_columns([c]) lst.append(p) lst.append(t) lst.writeto(self.temp("test.fits"), overwrite=True) with fits.open(self.temp("test.fits")) as p: assert p[1].data[1]["foo"] == 60000.0 def test_fits_file_path_object(self): """ Testing when fits file is passed as pathlib.Path object #4412. """ fpath = pathlib.Path(self.data("tdim.fits")) with fits.open(fpath) as hdulist: assert hdulist[0].filebytes() == 2880 assert hdulist[1].filebytes() == 5760 with fits.open(self.data("tdim.fits")) as hdulist2: assert FITSDiff(hdulist2, hdulist).identical is True def test_fits_pathlike_object(self): """ Testing when fits file is passed as os.PathLike object #11579. """ class TPath(os.PathLike): def __init__(self, path): self.path = path def __fspath__(self): return str(self.path) fpath = TPath(self.data("tdim.fits")) with fits.open(fpath) as hdulist: assert hdulist[0].filebytes() == 2880 assert hdulist[1].filebytes() == 5760 with fits.open(self.data("tdim.fits")) as hdulist2: assert FITSDiff(hdulist2, hdulist).identical is True def test_fits_file_bytes_object(self): """ Testing when fits file is passed as bytes. """ with fits.open(self.data("tdim.fits").encode()) as hdulist: assert hdulist[0].filebytes() == 2880 assert hdulist[1].filebytes() == 5760 with fits.open(self.data("tdim.fits")) as hdulist2: assert FITSDiff(hdulist2, hdulist).identical is True def test_add_del_columns(self): p = fits.ColDefs([]) p.add_col(fits.Column(name="FOO", format="3J")) p.add_col(fits.Column(name="BAR", format="1I")) assert p.names == ["FOO", "BAR"] p.del_col("FOO") assert p.names == ["BAR"] def test_add_del_columns2(self): hdulist = fits.open(self.data("tb.fits")) table = hdulist[1] assert table.data.dtype.names == ("c1", "c2", "c3", "c4") assert table.columns.names == ["c1", "c2", "c3", "c4"] table.columns.del_col("c1") assert table.data.dtype.names == ("c2", "c3", "c4") assert table.columns.names == ["c2", "c3", "c4"] table.columns.del_col("c3") assert table.data.dtype.names == ("c2", "c4") assert table.columns.names == ["c2", "c4"] table.columns.add_col(fits.Column("foo", "3J")) assert table.data.dtype.names == ("c2", "c4", "foo") assert table.columns.names == ["c2", "c4", "foo"] hdulist.writeto(self.temp("test.fits"), overwrite=True) hdulist.close() # NOTE: If you see a warning, might be related to # https://github.com/spacetelescope/PyFITS/issues/44 with fits.open(self.temp("test.fits")) as hdulist: table = hdulist[1] assert table.data.dtype.names == ("c2", "c4", "foo") assert table.columns.names == ["c2", "c4", "foo"] def test_update_header_card(self): """A very basic test for the Header.update method--I'd like to add a few more cases to this at some point. """ header = fits.Header() comment = "number of bits per data pixel" header["BITPIX"] = (16, comment) assert "BITPIX" in header assert header["BITPIX"] == 16 assert header.comments["BITPIX"] == comment header.update(BITPIX=32) assert header["BITPIX"] == 32 assert header.comments["BITPIX"] == "" def test_set_card_value(self): """Similar to test_update_header_card(), but tests the the `header['FOO'] = 'bar'` method of updating card values. """ header = fits.Header() comment = "number of bits per data pixel" card = fits.Card.fromstring(f"BITPIX = 32 / {comment}") header.append(card) header["BITPIX"] = 32 assert "BITPIX" in header assert header["BITPIX"] == 32 assert header.cards[0].keyword == "BITPIX" assert header.cards[0].value == 32 assert header.cards[0].comment == comment def test_uint(self): filename = self.data("o4sp040b0_raw.fits") with fits.open(filename, uint=False) as hdulist_f: with fits.open(filename, uint=True) as hdulist_i: assert hdulist_f[1].data.dtype == np.float32 assert hdulist_i[1].data.dtype == np.uint16 assert np.all(hdulist_f[1].data == hdulist_i[1].data) def test_fix_missing_card_append(self): hdu = fits.ImageHDU() errs = hdu.req_cards("TESTKW", None, None, "foo", "silentfix", []) assert len(errs) == 1 assert "TESTKW" in hdu.header assert hdu.header["TESTKW"] == "foo" assert hdu.header.cards[-1].keyword == "TESTKW" def test_fix_invalid_keyword_value(self): hdu = fits.ImageHDU() hdu.header["TESTKW"] = "foo" errs = hdu.req_cards("TESTKW", None, lambda v: v == "foo", "foo", "ignore", []) assert len(errs) == 0 # Now try a test that will fail, and ensure that an error will be # raised in 'exception' mode errs = hdu.req_cards( "TESTKW", None, lambda v: v == "bar", "bar", "exception", [] ) assert len(errs) == 1 assert errs[0][1] == "'TESTKW' card has invalid value 'foo'." # See if fixing will work hdu.req_cards("TESTKW", None, lambda v: v == "bar", "bar", "silentfix", []) assert hdu.header["TESTKW"] == "bar" def test_unfixable_missing_card(self): class TestHDU(fits.hdu.base.NonstandardExtHDU): def _verify(self, option="warn"): errs = super()._verify(option) hdu.req_cards("TESTKW", None, None, None, "fix", errs) return errs @classmethod def match_header(cls, header): # Since creating this HDU class adds it to the registry we # don't want the file reader to possibly think any actual # HDU from a file should be handled by this class return False hdu = TestHDU(header=fits.Header()) with pytest.raises(fits.VerifyError): hdu.verify("fix") def test_exception_on_verification_error(self): hdu = fits.ImageHDU() del hdu.header["XTENSION"] with pytest.raises(fits.VerifyError): hdu.verify("exception") def test_ignore_verification_error(self): hdu = fits.ImageHDU() del hdu.header["NAXIS"] # The default here would be to issue a warning; ensure that no warnings # or exceptions are raised hdu.verify("ignore") # Make sure the error wasn't fixed either, silently or otherwise assert "NAXIS" not in hdu.header def test_unrecognized_verify_option(self): hdu = fits.ImageHDU() with pytest.raises(ValueError): hdu.verify("foobarbaz") def test_errlist_basic(self): # Just some tests to make sure that _ErrList is setup correctly. # No arguments error_list = fits.verify._ErrList() assert error_list == [] # Some contents - this is not actually working, it just makes sure they # are kept. error_list = fits.verify._ErrList([1, 2, 3]) assert error_list == [1, 2, 3] def test_combined_verify_options(self): """ Test verify options like fix+ignore. """ def make_invalid_hdu(): hdu = fits.ImageHDU() # Add one keyword to the header that contains a fixable defect, and one # with an unfixable defect c1 = fits.Card.fromstring("test = ' test'") c2 = fits.Card.fromstring("P.I. = ' Hubble'") hdu.header.append(c1) hdu.header.append(c2) return hdu # silentfix+ignore should be completely silent hdu = make_invalid_hdu() hdu.verify("silentfix+ignore") # silentfix+warn should be quiet about the fixed HDU and only warn # about the unfixable one hdu = make_invalid_hdu() with pytest.warns(AstropyUserWarning, match="Illegal keyword name") as w: hdu.verify("silentfix+warn") assert len(w) == 4 # silentfix+exception should only mention the unfixable error in the # exception hdu = make_invalid_hdu() with pytest.raises(fits.VerifyError, match=r"Illegal keyword name") as excinfo: hdu.verify("silentfix+exception") assert "not upper case" not in str(excinfo.value) # fix+ignore is not too useful, but it should warn about the fixed # problems while saying nothing about the unfixable problems hdu = make_invalid_hdu() with pytest.warns(AstropyUserWarning, match="not upper case") as w: hdu.verify("fix+ignore") assert len(w) == 4 # fix+warn hdu = make_invalid_hdu() with pytest.warns(AstropyUserWarning) as w: hdu.verify("fix+warn") assert len(w) == 6 assert "not upper case" in str(w[2].message) assert "Illegal keyword name" in str(w[4].message) # fix+exception hdu = make_invalid_hdu() with pytest.raises(fits.VerifyError, match=r"Illegal keyword name") as excinfo: hdu.verify("fix+exception") assert "not upper case" in str(excinfo.value) def test_getext(self): """ Test the various different ways of specifying an extension header in the convenience functions. """ filename = self.data("test0.fits") hl, ext = _getext(filename, "readonly", 1) assert ext == 1 hl.close() pytest.raises(ValueError, _getext, filename, "readonly", 1, 2) pytest.raises(ValueError, _getext, filename, "readonly", (1, 2)) pytest.raises(ValueError, _getext, filename, "readonly", "sci", "sci") pytest.raises(TypeError, _getext, filename, "readonly", 1, 2, 3) hl, ext = _getext(filename, "readonly", ext=1) assert ext == 1 hl.close() hl, ext = _getext(filename, "readonly", ext=("sci", 2)) assert ext == ("sci", 2) hl.close() pytest.raises( TypeError, _getext, filename, "readonly", 1, ext=("sci", 2), extver=3 ) pytest.raises( TypeError, _getext, filename, "readonly", ext=("sci", 2), extver=3 ) hl, ext = _getext(filename, "readonly", "sci") assert ext == ("sci", 1) hl.close() hl, ext = _getext(filename, "readonly", "sci", 1) assert ext == ("sci", 1) hl.close() hl, ext = _getext(filename, "readonly", ("sci", 1)) assert ext == ("sci", 1) hl.close() hl, ext = _getext( filename, "readonly", "sci", extver=1, do_not_scale_image_data=True ) assert ext == ("sci", 1) hl.close() pytest.raises(TypeError, _getext, filename, "readonly", "sci", ext=1) pytest.raises(TypeError, _getext, filename, "readonly", "sci", 1, extver=2) hl, ext = _getext(filename, "readonly", extname="sci") assert ext == ("sci", 1) hl.close() hl, ext = _getext(filename, "readonly", extname="sci", extver=1) assert ext == ("sci", 1) hl.close() pytest.raises(TypeError, _getext, filename, "readonly", extver=1) def test_extension_name_case_sensitive(self): """ Tests that setting fits.conf.extension_name_case_sensitive at runtime works. """ hdu = fits.ImageHDU() hdu.name = "sCi" assert hdu.name == "SCI" assert hdu.header["EXTNAME"] == "SCI" with fits.conf.set_temp("extension_name_case_sensitive", True): hdu = fits.ImageHDU() hdu.name = "sCi" assert hdu.name == "sCi" assert hdu.header["EXTNAME"] == "sCi" hdu.name = "sCi" assert hdu.name == "SCI" assert hdu.header["EXTNAME"] == "SCI" def test_hdu_fromstring(self): """ Tests creating a fully-formed HDU object from a string containing the bytes of the HDU. """ infile = self.data("test0.fits") outfile = self.temp("test.fits") with open(infile, "rb") as fin: dat = fin.read() offset = 0 with fits.open(infile) as hdul: hdulen = hdul[0]._data_offset + hdul[0]._data_size hdu = fits.PrimaryHDU.fromstring(dat[:hdulen]) assert isinstance(hdu, fits.PrimaryHDU) assert hdul[0].header == hdu.header assert hdu.data is None hdu.header["TEST"] = "TEST" hdu.writeto(outfile) with fits.open(outfile) as hdul: assert isinstance(hdu, fits.PrimaryHDU) assert hdul[0].header[:-1] == hdu.header[:-1] assert hdul[0].header["TEST"] == "TEST" assert hdu.data is None with fits.open(infile) as hdul: for ext_hdu in hdul[1:]: offset += hdulen hdulen = len(str(ext_hdu.header)) + ext_hdu._data_size hdu = fits.ImageHDU.fromstring(dat[offset : offset + hdulen]) assert isinstance(hdu, fits.ImageHDU) assert ext_hdu.header == hdu.header assert (ext_hdu.data == hdu.data).all() def test_nonstandard_hdu(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/157 Tests that "Nonstandard" HDUs with SIMPLE = F are read and written without prepending a superfluous and unwanted standard primary HDU. """ data = np.arange(100, dtype=np.uint8) hdu = fits.PrimaryHDU(data=data) hdu.header["SIMPLE"] = False hdu.writeto(self.temp("test.fits")) info = [(0, "", 1, "NonstandardHDU", 5, (), "", "")] with fits.open(self.temp("test.fits")) as hdul: assert hdul.info(output=False) == info # NonstandardHDUs just treat the data as an unspecified array of # bytes. The first 100 bytes should match the original data we # passed in...the rest should be zeros padding out the rest of the # FITS block assert (hdul[0].data[:100] == data).all() assert (hdul[0].data[100:] == 0).all() def test_extname(self): """Test getting/setting the EXTNAME of an HDU.""" h1 = fits.PrimaryHDU() assert h1.name == "PRIMARY" # Normally a PRIMARY HDU should not have an EXTNAME, though it should # have a default .name attribute assert "EXTNAME" not in h1.header # The current version of the FITS standard does allow PRIMARY HDUs to # have an EXTNAME, however. h1.name = "NOTREAL" assert h1.name == "NOTREAL" assert h1.header.get("EXTNAME") == "NOTREAL" # Updating the EXTNAME in the header should update the .name h1.header["EXTNAME"] = "TOOREAL" assert h1.name == "TOOREAL" # If we delete an EXTNAME keyword from a PRIMARY HDU it should go back # to the default del h1.header["EXTNAME"] assert h1.name == "PRIMARY" # For extension HDUs the situation is a bit simpler: h2 = fits.ImageHDU() assert h2.name == "" assert "EXTNAME" not in h2.header h2.name = "HELLO" assert h2.name == "HELLO" assert h2.header.get("EXTNAME") == "HELLO" h2.header["EXTNAME"] = "GOODBYE" assert h2.name == "GOODBYE" def test_extver_extlevel(self): """Test getting/setting the EXTVER and EXTLEVEL of and HDU.""" # EXTVER and EXTNAME work exactly the same; their semantics are, for # now, to be inferred by the user. Although they should never be less # than 1, the standard does not explicitly forbid any value so long as # it's an integer h1 = fits.PrimaryHDU() assert h1.ver == 1 assert h1.level == 1 assert "EXTVER" not in h1.header assert "EXTLEVEL" not in h1.header h1.ver = 2 assert h1.header.get("EXTVER") == 2 h1.header["EXTVER"] = 3 assert h1.ver == 3 del h1.header["EXTVER"] h1.ver == 1 h1.level = 2 assert h1.header.get("EXTLEVEL") == 2 h1.header["EXTLEVEL"] = 3 assert h1.level == 3 del h1.header["EXTLEVEL"] assert h1.level == 1 pytest.raises(TypeError, setattr, h1, "ver", "FOO") pytest.raises(TypeError, setattr, h1, "level", "BAR") def test_consecutive_writeto(self): """ Regression test for an issue where calling writeto twice on the same HDUList could write a corrupted file. https://github.com/spacetelescope/PyFITS/issues/40 is actually a particular instance of this problem, though isn't unique to sys.stdout. """ with fits.open(self.data("test0.fits")) as hdul1: # Add a bunch of header keywords so that the data will be forced to # new offsets within the file: for idx in range(40): hdul1[1].header[f"TEST{idx}"] = "test" hdul1.writeto(self.temp("test1.fits")) hdul1.writeto(self.temp("test2.fits")) # Open a second handle to the original file and compare it to hdul1 # (We only compare part of the one header that was modified) # Compare also with the second writeto output with fits.open(self.data("test0.fits")) as hdul2: with fits.open(self.temp("test2.fits")) as hdul3: for hdul in (hdul1, hdul3): for idx, hdus in enumerate(zip(hdul2, hdul)): hdu2, hdu = hdus if idx != 1: assert hdu.header == hdu2.header else: assert hdu2.header == hdu.header[: len(hdu2.header)] assert np.all(hdu.data == hdu2.data) class TestConvenienceFunctions(FitsTestCase): def test_writeto(self, home_is_temp): """ Simple test for writing a trivial header and some data to a file with the `writeto()` convenience function. """ filename = self.temp("array.fits") data = np.zeros((100, 100)) header = fits.Header() fits.writeto(filename, data, header=header, overwrite=True) with fits.open(filename) as hdul: assert len(hdul) == 1 assert (data == hdul[0].data).all() def test_writeto_2(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/107 Test of `writeto()` with a trivial header containing a single keyword. """ filename = self.temp("array.fits") data = np.zeros((100, 100)) header = fits.Header() header.set("CRPIX1", 1.0) fits.writeto( filename, data, header=header, overwrite=True, output_verify="silentfix" ) with fits.open(filename) as hdul: assert len(hdul) == 1 assert (data == hdul[0].data).all() assert "CRPIX1" in hdul[0].header assert hdul[0].header["CRPIX1"] == 1.0 def test_writeto_overwrite(self, home_is_temp): """ Ensure the `overwrite` keyword works as it should """ filename = self.temp("array.fits") data = np.zeros((100, 100)) header = fits.Header() fits.writeto(filename, data, header=header) with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH): fits.writeto(filename, data, header=header, overwrite=False) fits.writeto(filename, data, header=header, overwrite=True) with fits.open(filename) as hdul: assert len(hdul) == 1 assert (data == hdul[0].data).all() class TestFileFunctions(FitsTestCase): """ Tests various basic I/O operations, specifically in the astropy.io.fits.file._File class. """ def test_open_nonexistent(self): """Test that trying to open a non-existent file results in an OSError (and not some other arbitrary exception). """ with pytest.raises(OSError, match=r"No such file or directory"): fits.open(self.temp("foobar.fits")) # But opening in ostream or append mode should be okay, since they # allow writing new files for mode in ("ostream", "append"): with fits.open(self.temp("foobar.fits"), mode=mode) as _: pass assert os.path.exists(self.temp("foobar.fits")) os.remove(self.temp("foobar.fits")) def test_open_file_handle(self): # Make sure we can open a FITS file from an open file handle with open(self.data("test0.fits"), "rb") as handle: with fits.open(handle) as _: pass with open(self.temp("temp.fits"), "wb") as handle: with fits.open(handle, mode="ostream") as _: pass # Opening without explicitly specifying binary mode should fail with pytest.raises(ValueError): with open(self.data("test0.fits")) as handle: with fits.open(handle) as _: pass # All of these read modes should fail for mode in ["r", "rt"]: with pytest.raises(ValueError): with open(self.data("test0.fits"), mode=mode) as handle: with fits.open(handle) as _: pass # These update or write modes should fail as well for mode in ["w", "wt", "w+", "wt+", "r+", "rt+", "a", "at", "a+", "at+"]: with pytest.raises(ValueError): with open(self.temp("temp.fits"), mode=mode) as handle: with fits.open(handle) as _: pass def test_fits_file_handle_mode_combo(self): # This should work fine since no mode is given with open(self.data("test0.fits"), "rb") as handle: with fits.open(handle) as _: pass # This should work fine since the modes are compatible with open(self.data("test0.fits"), "rb") as handle: with fits.open(handle, mode="readonly") as _: pass # This should not work since the modes conflict with pytest.raises(ValueError): with open(self.data("test0.fits"), "rb") as handle: with fits.open(handle, mode="ostream") as _: pass def test_open_from_url(self): file_url = "file:///" + self.data("test0.fits").lstrip("/") with urllib.request.urlopen(file_url) as urlobj: with fits.open(urlobj) as _: pass # It will not be possible to write to a file that is from a URL object for mode in ("ostream", "append", "update"): with pytest.raises(ValueError): with urllib.request.urlopen(file_url) as urlobj: with fits.open(urlobj, mode=mode) as _: pass @pytest.mark.remote_data(source="astropy") def test_open_from_remote_url(self): for dataurl in (conf.dataurl, conf.dataurl_mirror): remote_url = f"{dataurl}/allsky/allsky_rosat.fits" try: with urllib.request.urlopen(remote_url) as urlobj: with fits.open(urlobj) as fits_handle: assert len(fits_handle) == 1 for mode in ("ostream", "append", "update"): with pytest.raises(ValueError): with urllib.request.urlopen(remote_url) as urlobj: with fits.open(urlobj, mode=mode) as fits_handle: assert len(fits_handle) == 1 except (urllib.error.HTTPError, urllib.error.URLError): continue else: break else: raise Exception("Could not download file") def test_open_gzipped(self): gzip_file = self._make_gzip_file() with fits.open(gzip_file) as fits_handle: assert fits_handle._file.compression == "gzip" assert len(fits_handle) == 5 with fits.open(gzip.GzipFile(gzip_file)) as fits_handle: assert fits_handle._file.compression == "gzip" assert len(fits_handle) == 5 def test_open_gzipped_from_handle(self): with open(self._make_gzip_file(), "rb") as handle: with fits.open(handle) as fits_handle: assert fits_handle._file.compression == "gzip" def test_detect_gzipped(self): """Test detection of a gzip file when the extension is not .gz.""" with fits.open(self._make_gzip_file("test0.fz")) as fits_handle: assert fits_handle._file.compression == "gzip" assert len(fits_handle) == 5 def test_writeto_append_mode_gzip(self): """Regression test for https://github.com/spacetelescope/PyFITS/issues/33 Check that a new GzipFile opened in append mode can be used to write out a new FITS file. """ # Note: when opening a GzipFile the 'b+' is superfluous, but this was # still how the original test case looked # Note: with statement not supported on GzipFile in older Python # versions fileobj = gzip.GzipFile(self.temp("test.fits.gz"), "ab+") h = fits.PrimaryHDU() try: h.writeto(fileobj) finally: fileobj.close() with fits.open(self.temp("test.fits.gz")) as hdul: assert hdul[0].header == h.header def test_fits_update_mode_gzip(self): """Test updating a GZipped FITS file""" with fits.open(self._make_gzip_file("update.gz"), mode="update") as fits_handle: hdu = fits.ImageHDU(data=[x for x in range(100)]) fits_handle.append(hdu) with fits.open(self.temp("update.gz")) as new_handle: assert len(new_handle) == 6 assert (new_handle[-1].data == [x for x in range(100)]).all() def test_fits_append_mode_gzip(self): """Make sure that attempting to open an existing GZipped FITS file in 'append' mode raises an error""" with pytest.raises(OSError): with fits.open(self._make_gzip_file("append.gz"), mode="append") as _: pass @pytest.mark.skipif(not HAS_BZ2, reason="Python built without bz2 module") def test_open_bzipped(self): bzip_file = self._make_bzip2_file() with fits.open(bzip_file) as fits_handle: assert fits_handle._file.compression == "bzip2" assert len(fits_handle) == 5 with fits.open(bz2.BZ2File(bzip_file)) as fits_handle: assert fits_handle._file.compression == "bzip2" assert len(fits_handle) == 5 @pytest.mark.skipif(not HAS_BZ2, reason="Python built without bz2 module") def test_open_bzipped_from_handle(self): with open(self._make_bzip2_file(), "rb") as handle: with fits.open(handle) as fits_handle: assert fits_handle._file.compression == "bzip2" assert len(fits_handle) == 5 @pytest.mark.skipif(not HAS_BZ2, reason="Python built without bz2 module") def test_detect_bzipped(self): """Test detection of a bzip2 file when the extension is not .bz2.""" with fits.open(self._make_bzip2_file("test0.xx")) as fits_handle: assert fits_handle._file.compression == "bzip2" assert len(fits_handle) == 5 @pytest.mark.skipif(not HAS_BZ2, reason="Python built without bz2 module") def test_writeto_bzip2_fileobj(self): """Test writing to a bz2.BZ2File file like object""" fileobj = bz2.BZ2File(self.temp("test.fits.bz2"), "w") h = fits.PrimaryHDU() try: h.writeto(fileobj) finally: fileobj.close() with fits.open(self.temp("test.fits.bz2")) as hdul: assert hdul[0].header == h.header @pytest.mark.skipif(not HAS_BZ2, reason="Python built without bz2 module") def test_writeto_bzip2_filename(self): """Test writing to a bzip2 file by name""" filename = self.temp("testname.fits.bz2") h = fits.PrimaryHDU() h.writeto(filename) with fits.open(self.temp("testname.fits.bz2")) as hdul: assert hdul[0].header == h.header def test_open_zipped(self): zip_file = self._make_zip_file() with fits.open(zip_file) as fits_handle: assert fits_handle._file.compression == "zip" assert len(fits_handle) == 5 with fits.open(zipfile.ZipFile(zip_file)) as fits_handle: assert fits_handle._file.compression == "zip" assert len(fits_handle) == 5 def test_open_zipped_from_handle(self): with open(self._make_zip_file(), "rb") as handle: with fits.open(handle) as fits_handle: assert fits_handle._file.compression == "zip" assert len(fits_handle) == 5 def test_detect_zipped(self): """Test detection of a zip file when the extension is not .zip.""" zf = self._make_zip_file(filename="test0.fz") with fits.open(zf) as fits_handle: assert len(fits_handle) == 5 def test_open_zipped_writeable(self): """Opening zipped files in a writeable mode should fail.""" zf = self._make_zip_file() pytest.raises(OSError, fits.open, zf, "update") pytest.raises(OSError, fits.open, zf, "append") zf = zipfile.ZipFile(zf, "a") pytest.raises(OSError, fits.open, zf, "update") pytest.raises(OSError, fits.open, zf, "append") def test_read_open_astropy_gzip_file(self): """ Regression test for https://github.com/astropy/astropy/issues/2774 This tests reading from a ``GzipFile`` object from Astropy's compatibility copy of the ``gzip`` module. """ gf = gzip.GzipFile(self._make_gzip_file()) try: assert len(fits.open(gf)) == 5 finally: gf.close() def test_open_multiple_member_zipfile(self): """ Opening zip files containing more than one member files should fail as there's no obvious way to specify which file is the FITS file to read. """ zfile = zipfile.ZipFile(self.temp("test0.zip"), "w") zfile.write(self.data("test0.fits")) zfile.writestr("foo", "bar") zfile.close() with pytest.raises(OSError): fits.open(zfile.filename) def test_read_open_file(self): """Read from an existing file object.""" with open(self.data("test0.fits"), "rb") as f: assert len(fits.open(f)) == 5 def test_read_closed_file(self): """Read from an existing file object that's been closed.""" f = open(self.data("test0.fits"), "rb") f.close() with fits.open(f) as f2: assert len(f2) == 5 def test_read_open_gzip_file(self): """Read from an open gzip file object.""" gf = gzip.GzipFile(self._make_gzip_file()) try: assert len(fits.open(gf)) == 5 finally: gf.close() def test_open_gzip_file_for_writing(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/195.""" gf = self._make_gzip_file() with fits.open(gf, mode="update") as h: h[0].header["EXPFLAG"] = "ABNORMAL" h[1].data[0, 0] = 1 with fits.open(gf) as h: # Just to make sure the update worked; if updates work # normal writes should work too... assert h[0].header["EXPFLAG"] == "ABNORMAL" assert h[1].data[0, 0] == 1 def test_write_read_gzip_file(self, home_is_temp): """ Regression test for https://github.com/astropy/astropy/issues/2794 Ensure files written through gzip are readable. """ data = np.arange(100) hdu = fits.PrimaryHDU(data=data) hdu.writeto(self.temp("test.fits.gz")) with open(os.path.expanduser(self.temp("test.fits.gz")), "rb") as f: assert f.read(3) == GZIP_MAGIC with fits.open(self.temp("test.fits.gz")) as hdul: assert np.all(hdul[0].data == data) @pytest.mark.parametrize("ext", ["gz", "bz2", "zip"]) def test_compressed_ext_but_not_compressed(self, ext): testfile = self.temp(f"test0.fits.{ext}") shutil.copy(self.data("test0.fits"), testfile) with fits.open(testfile) as hdul: assert len(hdul) == 5 fits.append(testfile, np.arange(5)) with fits.open(testfile) as hdul: assert len(hdul) == 6 def test_read_file_like_object(self): """Test reading a FITS file from a file-like object.""" filelike = io.BytesIO() with open(self.data("test0.fits"), "rb") as f: filelike.write(f.read()) filelike.seek(0) assert len(fits.open(filelike)) == 5 def test_updated_file_permissions(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/79 Tests that when a FITS file is modified in update mode, the file permissions are preserved. """ filename = self.temp("test.fits") hdul = [fits.PrimaryHDU(), fits.ImageHDU()] hdul = fits.HDUList(hdul) hdul.writeto(filename) old_mode = os.stat(filename).st_mode hdul = fits.open(filename, mode="update") hdul.insert(1, fits.ImageHDU()) hdul.flush() hdul.close() assert old_mode == os.stat(filename).st_mode def test_fileobj_mode_guessing(self): """Tests whether a file opened without a specified io.fits mode ('readonly', etc.) is opened in a mode appropriate for the given file object. """ self.copy_file("test0.fits") # Opening in text mode should outright fail for mode in ("r", "w", "a"): with open(self.temp("test0.fits"), mode) as f: pytest.raises(ValueError, fits.HDUList.fromfile, f) # Need to re-copy the file since opening it in 'w' mode blew it away self.copy_file("test0.fits") with open(self.temp("test0.fits"), "rb") as f: with fits.HDUList.fromfile(f) as h: assert h.fileinfo(0)["filemode"] == "readonly" for mode in ("wb", "ab"): with open(self.temp("test0.fits"), mode) as f: with fits.HDUList.fromfile(f) as h: # Basically opening empty files for output streaming assert len(h) == 0 # Need to re-copy the file since opening it in 'w' mode blew it away self.copy_file("test0.fits") with open(self.temp("test0.fits"), "wb+") as f: with fits.HDUList.fromfile(f) as h: # wb+ still causes an existing file to be overwritten so there # are no HDUs assert len(h) == 0 # Need to re-copy the file since opening it in 'w' mode blew it away self.copy_file("test0.fits") with open(self.temp("test0.fits"), "rb+") as f: with fits.HDUList.fromfile(f) as h: assert h.fileinfo(0)["filemode"] == "update" with open(self.temp("test0.fits"), "ab+") as f: with fits.HDUList.fromfile(f) as h: assert h.fileinfo(0)["filemode"] == "append" def test_mmap_unwriteable(self): """Regression test for https://github.com/astropy/astropy/issues/968 Temporarily patches mmap.mmap to exhibit platform-specific bad behavior. """ class MockMmap(mmap.mmap): def flush(self): raise OSError("flush is broken on this platform") old_mmap = mmap.mmap mmap.mmap = MockMmap # Force the mmap test to be rerun _File.__dict__["_mmap_available"]._cache.clear() try: self.copy_file("test0.fits") with pytest.warns( AstropyUserWarning, match=r"mmap\.flush is unavailable" ) as w: with fits.open( self.temp("test0.fits"), mode="update", memmap=True ) as h: h[1].data[0, 0] = 999 assert len(w) == 1 # Double check that writing without mmap still worked with fits.open(self.temp("test0.fits")) as h: assert h[1].data[0, 0] == 999 finally: mmap.mmap = old_mmap _File.__dict__["_mmap_available"]._cache.clear() @pytest.mark.openfiles_ignore def test_mmap_allocate_error(self): """ Regression test for https://github.com/astropy/astropy/issues/1380 Temporarily patches mmap.mmap to raise an OSError if mode is ACCESS_COPY. """ mmap_original = mmap.mmap # We patch mmap here to raise an error if access=mmap.ACCESS_COPY, which # emulates an issue that an OSError is raised if the available address # space is less than the size of the file even if memory mapping is used. def mmap_patched(*args, **kwargs): if kwargs.get("access") == mmap.ACCESS_COPY: exc = OSError() exc.errno = errno.ENOMEM raise exc else: return mmap_original(*args, **kwargs) with fits.open(self.data("test0.fits"), memmap=True) as hdulist: with patch.object(mmap, "mmap", side_effect=mmap_patched) as p: with pytest.warns( AstropyUserWarning, match=r"Could not memory " r"map array with mode='readonly'", ): data = hdulist[1].data p.reset_mock() assert not data.flags.writeable def test_mmap_closing(self): """ Tests that the mmap reference is closed/removed when there aren't any HDU data references left. """ if not _File._mmap_available: pytest.xfail("not expected to work on platforms without mmap support") with fits.open(self.data("test0.fits"), memmap=True) as hdul: assert hdul._file._mmap is None hdul[1].data assert hdul._file._mmap is not None del hdul[1].data # Should be no more references to data in the file so close the # mmap assert hdul._file._mmap is None hdul[1].data hdul[2].data del hdul[1].data # hdul[2].data is still references so keep the mmap open assert hdul._file._mmap is not None del hdul[2].data assert hdul._file._mmap is None assert hdul._file._mmap is None with fits.open(self.data("test0.fits"), memmap=True) as hdul: hdul[1].data # When the only reference to the data is on the hdu object, and the # hdulist it belongs to has been closed, the mmap should be closed as # well assert hdul._file._mmap is None with fits.open(self.data("test0.fits"), memmap=True) as hdul: data = hdul[1].data # also make a copy data_copy = data.copy() # The HDUList is closed; in fact, get rid of it completely del hdul # The data array should still work though... assert np.all(data == data_copy) def test_uncloseable_file(self): """ Regression test for https://github.com/astropy/astropy/issues/2356 Demonstrates that FITS files can still be read from file-like objects that don't have an obvious "open" or "closed" state. """ class MyFileLike: def __init__(self, foobar): self._foobar = foobar def read(self, n): return self._foobar.read(n) def seek(self, offset, whence=os.SEEK_SET): self._foobar.seek(offset, whence) def tell(self): return self._foobar.tell() with open(self.data("test0.fits"), "rb") as f: fileobj = MyFileLike(f) with fits.open(fileobj) as hdul1: with fits.open(self.data("test0.fits")) as hdul2: assert hdul1.info(output=False) == hdul2.info(output=False) for hdu1, hdu2 in zip(hdul1, hdul2): assert hdu1.header == hdu2.header if hdu1.data is not None and hdu2.data is not None: assert np.all(hdu1.data == hdu2.data) def test_write_bytesio_discontiguous(self): """ Regression test related to https://github.com/astropy/astropy/issues/2794#issuecomment-55441539 Demonstrates that writing an HDU containing a discontiguous Numpy array should work properly. """ data = np.arange(100)[::3] hdu = fits.PrimaryHDU(data=data) fileobj = io.BytesIO() hdu.writeto(fileobj) fileobj.seek(0) with fits.open(fileobj) as h: assert np.all(h[0].data == data) def test_write_bytesio(self): """ Regression test for https://github.com/astropy/astropy/issues/2463 Test against `io.BytesIO`. `io.StringIO` is not supported. """ self._test_write_string_bytes_io(io.BytesIO()) @pytest.mark.skipif( sys.platform.startswith("win32"), reason="Cannot test on Windows" ) def test_filename_with_colon(self): """ Test reading and writing a file with a colon in the filename. Regression test for https://github.com/astropy/astropy/issues/3122 """ # Skip on Windows since colons in filenames makes NTFS sad. filename = "APEXHET.2014-04-01T15:18:01.000.fits" hdu = fits.PrimaryHDU(data=np.arange(10)) hdu.writeto(self.temp(filename)) with fits.open(self.temp(filename)) as hdul: assert np.all(hdul[0].data == hdu.data) def test_writeto_full_disk(self, monkeypatch): """ Test that it gives a readable error when trying to write an hdulist to a full disk. """ def _writeto(self, array): raise OSError("Fake error raised when writing file.") def get_free_space_in_dir(path): return 0 msg = ( "Not enough space on disk: requested 8000, available 0. " "Fake error raised when writing file." ) with pytest.raises(OSError, match=msg) as exc: monkeypatch.setattr(fits.hdu.base._BaseHDU, "_writeto", _writeto) monkeypatch.setattr(data, "get_free_space_in_dir", get_free_space_in_dir) n = np.arange(0, 1000, dtype="int64") hdu = fits.PrimaryHDU(n) hdulist = fits.HDUList(hdu) filename = self.temp("test.fits") with open(filename, mode="wb") as fileobj: hdulist.writeto(fileobj) def test_flush_full_disk(self, monkeypatch): """ Test that it gives a readable error when trying to update an hdulist to a full disk. """ filename = self.temp("test.fits") hdul = [fits.PrimaryHDU(), fits.ImageHDU()] hdul = fits.HDUList(hdul) hdul[0].data = np.arange(0, 1000, dtype="int64") hdul.writeto(filename) def _writedata(self, fileobj): raise OSError("Fake error raised when writing file.") def get_free_space_in_dir(path): return 0 monkeypatch.setattr(fits.hdu.base._BaseHDU, "_writedata", _writedata) monkeypatch.setattr(data, "get_free_space_in_dir", get_free_space_in_dir) msg = ( "Not enough space on disk: requested 8000, available 0. " "Fake error raised when writing file." ) with pytest.raises(OSError, match=msg) as exc: with fits.open(filename, mode="update") as hdul: hdul[0].data = np.arange(0, 1000, dtype="int64") hdul.insert(1, fits.ImageHDU()) hdul.flush() def _test_write_string_bytes_io(self, fileobj): """ Implemented for both test_write_stringio and test_write_bytesio. """ with fits.open(self.data("test0.fits")) as hdul: hdul.writeto(fileobj) hdul2 = fits.HDUList.fromstring(fileobj.getvalue()) assert FITSDiff(hdul, hdul2).identical def _make_gzip_file(self, filename="test0.fits.gz"): gzfile = self.temp(filename) with open(self.data("test0.fits"), "rb") as f: gz = gzip.open(gzfile, "wb") gz.write(f.read()) gz.close() return gzfile def test_write_overwrite(self, home_is_temp): filename = self.temp("test_overwrite.fits") hdu = fits.PrimaryHDU(data=np.arange(10)) hdu.writeto(filename) with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH): hdu.writeto(filename) hdu.writeto(filename, overwrite=True) def _make_zip_file(self, mode="copyonwrite", filename="test0.fits.zip"): zfile = zipfile.ZipFile(self.temp(filename), "w") zfile.write(self.data("test0.fits")) zfile.close() return zfile.filename def _make_bzip2_file(self, filename="test0.fits.bz2"): bzfile = self.temp(filename) with open(self.data("test0.fits"), "rb") as f: bz = bz2.BZ2File(bzfile, "w") bz.write(f.read()) bz.close() return bzfile def test_simulateonly(self): """Write to None simulates writing.""" with fits.open(self.data("test0.fits")) as hdul: hdul.writeto(None) hdul[0].writeto(None) hdul[0].header.tofile(None) def test_bintablehdu_zero_bytes(self): """Make sure we don't have any zero-byte writes in BinTableHDU""" bright = np.rec.array( [ (1, "Sirius", -1.45, "A1V"), (2, "Canopus", -0.73, "F0Ib"), (3, "Rigil Kent", -0.1, "G2V"), ], formats="int16,a20,float32,a10", names="order,name,mag,Sp", ) hdu_non_zero = fits.BinTableHDU(bright) # use safeio, a special file handler meant to fail on zero-byte writes fh = safeio.CatchZeroByteWriter(open(self.temp("bright.fits"), mode="wb")) hdu_non_zero.writeto(fh) fh.close() def test_primaryhdu_zero_bytes(self): """ Make sure we don't have any zero-byte writes from an ImageHDU (or other) of `size % BLOCK_SIZE == 0` """ hdu_img_2880 = fits.PrimaryHDU(data=np.arange(720, dtype="i4")) # use safeio, a special file handler meant to fail on zero-byte writes fh = safeio.CatchZeroByteWriter(open(self.temp("image.fits"), mode="wb")) hdu_img_2880.writeto(fh) fh.close() class TestStreamingFunctions(FitsTestCase): """Test functionality of the StreamingHDU class.""" def test_streaming_hdu(self, home_is_temp): shdu = self._make_streaming_hdu(self.temp("new.fits")) assert isinstance(shdu.size, int) assert shdu.size == 100 arr = np.arange(25, dtype=np.int32).reshape((5, 5)) shdu.write(arr) assert shdu.writecomplete shdu.close() with fits.open(self.temp("new.fits")) as hdul: assert len(hdul) == 1 assert (hdul[0].data == arr).all() def test_streaming_hdu_file_wrong_mode(self): """ Test that streaming an HDU to a file opened in the wrong mode fails as expected. """ with pytest.raises(ValueError): with open(self.temp("new.fits"), "wb") as f: header = fits.Header() fits.StreamingHDU(f, header) def test_streaming_hdu_write_file(self): """Test streaming an HDU to an open file object.""" arr = np.zeros((5, 5), dtype=np.int32) with open(self.temp("new.fits"), "ab+") as f: shdu = self._make_streaming_hdu(f) shdu.write(arr) assert shdu.writecomplete assert shdu.size == 100 with fits.open(self.temp("new.fits")) as hdul: assert len(hdul) == 1 assert (hdul[0].data == arr).all() def test_streaming_hdu_write_file_like(self): """Test streaming an HDU to an open file-like object.""" arr = np.zeros((5, 5), dtype=np.int32) # The file-like object underlying a StreamingHDU must be in binary mode sf = io.BytesIO() shdu = self._make_streaming_hdu(sf) shdu.write(arr) assert shdu.writecomplete assert shdu.size == 100 sf.seek(0) hdul = fits.open(sf) assert len(hdul) == 1 assert (hdul[0].data == arr).all() def test_streaming_hdu_append_extension(self): arr = np.zeros((5, 5), dtype=np.int32) with open(self.temp("new.fits"), "ab+") as f: shdu = self._make_streaming_hdu(f) shdu.write(arr) # Doing this again should update the file with an extension with open(self.temp("new.fits"), "ab+") as f: shdu = self._make_streaming_hdu(f) shdu.write(arr) def test_fix_invalid_extname(self, capsys): phdu = fits.PrimaryHDU() ihdu = fits.ImageHDU() ihdu.header["EXTNAME"] = 12345678 hdul = fits.HDUList([phdu, ihdu]) filename = self.temp("temp.fits") pytest.raises( fits.VerifyError, hdul.writeto, filename, output_verify="exception" ) with pytest.warns( fits.verify.VerifyWarning, match=r"Verification reported errors" ): hdul.writeto(filename, output_verify="fix") with fits.open(filename): assert hdul[1].name == "12345678" assert hdul[1].header["EXTNAME"] == "12345678" hdul.close() def _make_streaming_hdu(self, fileobj): hd = fits.Header() hd["SIMPLE"] = (True, "conforms to FITS standard") hd["BITPIX"] = (32, "array data type") hd["NAXIS"] = (2, "number of array dimensions") hd["NAXIS1"] = 5 hd["NAXIS2"] = 5 hd["EXTEND"] = True return fits.StreamingHDU(fileobj, hd) def test_blank_ignore(self): with fits.open(self.data("blank.fits"), ignore_blank=True) as f: assert f[0].data.flat[0] == 2 def test_error_if_memmap_impossible(self): pth = self.data("blank.fits") with fits.open(pth, memmap=True) as hdul: with pytest.raises(ValueError): hdul[0].data # However, it should not fail if do_not_scale_image_data was used: # See https://github.com/astropy/astropy/issues/3766 with fits.open(pth, memmap=True, do_not_scale_image_data=True) as hdul: hdul[0].data # Just make sure it doesn't crash
115bc8f35152bf31c9f0e3f19abc4458111f42c994780aa7058c5c67d1b8bbcd
# Licensed under a 3-clause BSD style license - see PYFITS.rst import math import os import re import time import numpy as np import pytest from numpy.testing import assert_equal from astropy.io import fits from astropy.io.fits.hdu.compressed import DITHER_SEED_CHECKSUM, SUBTRACTIVE_DITHER_1 from astropy.utils.data import download_file, get_pkg_data_filename from astropy.utils.exceptions import AstropyUserWarning from .conftest import FitsTestCase from .test_table import comparerecords class TestImageFunctions(FitsTestCase): def test_constructor_name_arg(self): """Like the test of the same name in test_table.py""" hdu = fits.ImageHDU() assert hdu.name == "" assert "EXTNAME" not in hdu.header hdu.name = "FOO" assert hdu.name == "FOO" assert hdu.header["EXTNAME"] == "FOO" # Passing name to constructor hdu = fits.ImageHDU(name="FOO") assert hdu.name == "FOO" assert hdu.header["EXTNAME"] == "FOO" # And overriding a header with a different extname hdr = fits.Header() hdr["EXTNAME"] = "EVENTS" hdu = fits.ImageHDU(header=hdr, name="FOO") assert hdu.name == "FOO" assert hdu.header["EXTNAME"] == "FOO" def test_constructor_ver_arg(self): def assert_ver_is(hdu, reference_ver): assert hdu.ver == reference_ver assert hdu.header["EXTVER"] == reference_ver hdu = fits.ImageHDU() assert hdu.ver == 1 # defaults to 1 assert "EXTVER" not in hdu.header hdu.ver = 1 assert_ver_is(hdu, 1) # Passing name to constructor hdu = fits.ImageHDU(ver=2) assert_ver_is(hdu, 2) # And overriding a header with a different extver hdr = fits.Header() hdr["EXTVER"] = 3 hdu = fits.ImageHDU(header=hdr, ver=4) assert_ver_is(hdu, 4) # The header card is not overridden if ver is None or not passed in hdr = fits.Header() hdr["EXTVER"] = 5 hdu = fits.ImageHDU(header=hdr, ver=None) assert_ver_is(hdu, 5) hdu = fits.ImageHDU(header=hdr) assert_ver_is(hdu, 5) def test_constructor_copies_header(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/153 Ensure that a header from one HDU is copied when used to initialize new HDU. """ ifd = fits.HDUList(fits.PrimaryHDU()) phdr = ifd[0].header phdr["FILENAME"] = "labq01i3q_rawtag.fits" primary_hdu = fits.PrimaryHDU(header=phdr) ofd = fits.HDUList(primary_hdu) ofd[0].header["FILENAME"] = "labq01i3q_flt.fits" # Original header should be unchanged assert phdr["FILENAME"] == "labq01i3q_rawtag.fits" def test_open(self): # The function "open" reads a FITS file into an HDUList object. There # are three modes to open: "readonly" (the default), "append", and # "update". # Open a file read-only (the default mode), the content of the FITS # file are read into memory. r = fits.open(self.data("test0.fits")) # readonly # data parts are latent instantiation, so if we close the HDUList # without touching data, data can not be accessed. r.close() with pytest.raises(IndexError) as exc_info: r[1].data[:2, :2] # Check that the exception message is the enhanced version, not the # default message from list.__getitem__ assert str(exc_info.value) == ( "HDU not found, possibly because the index " "is out of range, or because the file was " "closed before all HDUs were read" ) def test_open_2(self): r = fits.open(self.data("test0.fits")) info = [(0, "PRIMARY", 1, "PrimaryHDU", 138, (), "", "")] + [ (x, "SCI", x, "ImageHDU", 61, (40, 40), "int16", "") for x in range(1, 5) ] try: assert r.info(output=False) == info finally: r.close() def test_open_3(self): # Test that HDUs cannot be accessed after the file was closed r = fits.open(self.data("test0.fits")) r.close() with pytest.raises(IndexError) as exc_info: r[1] # Check that the exception message is the enhanced version, not the # default message from list.__getitem__ assert str(exc_info.value) == ( "HDU not found, possibly because the index " "is out of range, or because the file was " "closed before all HDUs were read" ) # Test that HDUs can be accessed with lazy_load_hdus=False r = fits.open(self.data("test0.fits"), lazy_load_hdus=False) r.close() assert isinstance(r[1], fits.ImageHDU) assert len(r) == 5 with pytest.raises(IndexError) as exc_info: r[6] assert str(exc_info.value) == "list index out of range" # And the same with the global config item assert fits.conf.lazy_load_hdus # True by default fits.conf.lazy_load_hdus = False try: r = fits.open(self.data("test0.fits")) r.close() assert isinstance(r[1], fits.ImageHDU) assert len(r) == 5 finally: fits.conf.lazy_load_hdus = True def test_fortran_array(self): # Test that files are being correctly written+read for "C" and "F" order arrays a = np.arange(21).reshape(3, 7) b = np.asfortranarray(a) afits = self.temp("a_str.fits") bfits = self.temp("b_str.fits") # writing to str specified files fits.PrimaryHDU(data=a).writeto(afits) fits.PrimaryHDU(data=b).writeto(bfits) np.testing.assert_array_equal(fits.getdata(afits), a) np.testing.assert_array_equal(fits.getdata(bfits), a) # writing to fileobjs aafits = self.temp("a_fileobj.fits") bbfits = self.temp("b_fileobj.fits") with open(aafits, mode="wb") as fd: fits.PrimaryHDU(data=a).writeto(fd) with open(bbfits, mode="wb") as fd: fits.PrimaryHDU(data=b).writeto(fd) np.testing.assert_array_equal(fits.getdata(aafits), a) np.testing.assert_array_equal(fits.getdata(bbfits), a) def test_fortran_array_non_contiguous(self): # Test that files are being correctly written+read for 'C' and 'F' order arrays a = np.arange(105).reshape(3, 5, 7) b = np.asfortranarray(a) # writing to str specified files afits = self.temp("a_str_slice.fits") bfits = self.temp("b_str_slice.fits") fits.PrimaryHDU(data=a[::2, ::2]).writeto(afits) fits.PrimaryHDU(data=b[::2, ::2]).writeto(bfits) np.testing.assert_array_equal(fits.getdata(afits), a[::2, ::2]) np.testing.assert_array_equal(fits.getdata(bfits), a[::2, ::2]) # writing to fileobjs aafits = self.temp("a_fileobj_slice.fits") bbfits = self.temp("b_fileobj_slice.fits") with open(aafits, mode="wb") as fd: fits.PrimaryHDU(data=a[::2, ::2]).writeto(fd) with open(bbfits, mode="wb") as fd: fits.PrimaryHDU(data=b[::2, ::2]).writeto(fd) np.testing.assert_array_equal(fits.getdata(aafits), a[::2, ::2]) np.testing.assert_array_equal(fits.getdata(bbfits), a[::2, ::2]) def test_primary_with_extname(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/151 Tests that the EXTNAME keyword works with Primary HDUs as well, and interacts properly with the .name attribute. For convenience hdulist['PRIMARY'] will still refer to the first HDU even if it has an EXTNAME not equal to 'PRIMARY'. """ prihdr = fits.Header([("EXTNAME", "XPRIMARY"), ("EXTVER", 1)]) hdul = fits.HDUList([fits.PrimaryHDU(header=prihdr)]) assert "EXTNAME" in hdul[0].header assert hdul[0].name == "XPRIMARY" assert hdul[0].name == hdul[0].header["EXTNAME"] info = [(0, "XPRIMARY", 1, "PrimaryHDU", 5, (), "", "")] assert hdul.info(output=False) == info assert hdul["PRIMARY"] is hdul["XPRIMARY"] assert hdul["PRIMARY"] is hdul[("XPRIMARY", 1)] hdul[0].name = "XPRIMARY2" assert hdul[0].header["EXTNAME"] == "XPRIMARY2" hdul.writeto(self.temp("test.fits")) with fits.open(self.temp("test.fits")) as hdul: assert hdul[0].name == "XPRIMARY2" def test_io_manipulation(self): # Get a keyword value. An extension can be referred by name or by # number. Both extension and keyword names are case insensitive. with fits.open(self.data("test0.fits")) as r: assert r["primary"].header["naxis"] == 0 assert r[0].header["naxis"] == 0 # If there are more than one extension with the same EXTNAME value, # the EXTVER can be used (as the second argument) to distinguish # the extension. assert r["sci", 1].header["detector"] == 1 # append (using "update()") a new card r[0].header["xxx"] = 1.234e56 assert ( "\n".join(str(x) for x in r[0].header.cards[-3:]) == "EXPFLAG = 'NORMAL ' / Exposure interruption indicator \n" "FILENAME= 'vtest3.fits' / File name \n" "XXX = 1.234E+56 " ) # rename a keyword r[0].header.rename_keyword("filename", "fname") pytest.raises(ValueError, r[0].header.rename_keyword, "fname", "history") pytest.raises(ValueError, r[0].header.rename_keyword, "fname", "simple") r[0].header.rename_keyword("fname", "filename") # get a subsection of data assert np.array_equal( r[2].data[:3, :3], np.array( [[349, 349, 348], [349, 349, 347], [347, 350, 349]], dtype=np.int16 ), ) # We can create a new FITS file by opening a new file with "append" # mode. with fits.open(self.temp("test_new.fits"), mode="append") as n: # Append the primary header and the 2nd extension to the new # file. n.append(r[0]) n.append(r[2]) # The flush method will write the current HDUList object back # to the newly created file on disk. The HDUList is still open # and can be further operated. n.flush() assert n[1].data[1, 1] == 349 # modify a data point n[1].data[1, 1] = 99 # When the file is closed, the most recent additions of # extension(s) since last flush() will be appended, but any HDU # already existed at the last flush will not be modified del n # If an existing file is opened with "append" mode, like the # readonly mode, the HDU's will be read into the HDUList which can # be modified in memory but can not be written back to the original # file. A file opened with append mode can only add new HDU's. os.rename(self.temp("test_new.fits"), self.temp("test_append.fits")) with fits.open(self.temp("test_append.fits"), mode="append") as a: # The above change did not take effect since this was made # after the flush(). assert a[1].data[1, 1] == 349 a.append(r[1]) del a # When changes are made to an HDUList which was opened with # "update" mode, they will be written back to the original file # when a flush/close is called. os.rename(self.temp("test_append.fits"), self.temp("test_update.fits")) with fits.open(self.temp("test_update.fits"), mode="update") as u: # When the changes do not alter the size structures of the # original (or since last flush) HDUList, the changes are # written back "in place". assert u[0].header["rootname"] == "U2EQ0201T" u[0].header["rootname"] = "abc" assert u[1].data[1, 1] == 349 u[1].data[1, 1] = 99 u.flush() # If the changes affect the size structure, e.g. adding or # deleting HDU(s), header was expanded or reduced beyond # existing number of blocks (2880 bytes in each block), or # change the data size, the HDUList is written to a temporary # file, the original file is deleted, and the temporary file is # renamed to the original file name and reopened in the update # mode. To a user, these two kinds of updating writeback seem # to be the same, unless the optional argument in flush or # close is set to 1. del u[2] u.flush() # The write method in HDUList class writes the current HDUList, # with all changes made up to now, to a new file. This method # works the same disregard the mode the HDUList was opened # with. u.append(r[3]) u.writeto(self.temp("test_new.fits")) del u # Another useful new HDUList method is readall. It will "touch" the # data parts in all HDUs, so even if the HDUList is closed, we can # still operate on the data. with fits.open(self.data("test0.fits")) as r: r.readall() assert r[1].data[1, 1] == 315 # create an HDU with data only data = np.ones((3, 5), dtype=np.float32) hdu = fits.ImageHDU(data=data, name="SCI") assert np.array_equal( hdu.data, np.array( [ [1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0, 1.0], ], dtype=np.float32, ), ) # create an HDU with header and data # notice that the header has the right NAXIS's since it is constructed # with ImageHDU hdu2 = fits.ImageHDU(header=r[1].header, data=np.array([1, 2], dtype="int32")) assert ( "\n".join(str(x) for x in hdu2.header.cards[1:5]) == "BITPIX = 32 / array data type \n" "NAXIS = 1 / number of array dimensions \n" "NAXIS1 = 2 \n" "PCOUNT = 0 / number of parameters " ) def test_memory_mapping(self): # memory mapping f1 = fits.open(self.data("test0.fits"), memmap=1) f1.close() def test_verification_on_output(self): # verification on output # make a defect HDUList first x = fits.ImageHDU() hdu = fits.HDUList(x) # HDUList can take a list or one single HDU with pytest.warns( AstropyUserWarning, match=r"HDUList's 0th element is not a primary HDU\." ) as w: hdu.verify() assert len(w) == 3 with pytest.warns( AstropyUserWarning, match=r"HDUList's 0th element is not a primary HDU\. " r"Fixed by inserting one as 0th HDU\.", ) as w: hdu.writeto(self.temp("test_new2.fits"), "fix") assert len(w) == 3 def test_section(self): # section testing fs = fits.open(self.data("arange.fits")) assert np.array_equal(fs[0].section[3, 2, 5], 357) assert np.array_equal( fs[0].section[3, 2, :], np.array([352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362]), ) assert np.array_equal( fs[0].section[3, 2, 4:], np.array([356, 357, 358, 359, 360, 361, 362]) ) assert np.array_equal( fs[0].section[3, 2, :8], np.array([352, 353, 354, 355, 356, 357, 358, 359]) ) assert np.array_equal( fs[0].section[3, 2, -8:8], np.array([355, 356, 357, 358, 359]) ) assert np.array_equal( fs[0].section[3, 2:5, :], np.array( [ [352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362], [363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373], [374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384], ] ), ) assert np.array_equal( fs[0].section[3, :, :][:3, :3], np.array([[330, 331, 332], [341, 342, 343], [352, 353, 354]]), ) dat = fs[0].data assert np.array_equal(fs[0].section[3, 2:5, :8], dat[3, 2:5, :8]) assert np.array_equal(fs[0].section[3, 2:5, 3], dat[3, 2:5, 3]) assert np.array_equal( fs[0].section[3:6, :, :][:3, :3, :3], np.array( [ [[330, 331, 332], [341, 342, 343], [352, 353, 354]], [[440, 441, 442], [451, 452, 453], [462, 463, 464]], [[550, 551, 552], [561, 562, 563], [572, 573, 574]], ] ), ) assert np.array_equal( fs[0].section[:, :, :][:3, :2, :2], np.array( [[[0, 1], [11, 12]], [[110, 111], [121, 122]], [[220, 221], [231, 232]]] ), ) assert np.array_equal(fs[0].section[:, 2, :], dat[:, 2, :]) assert np.array_equal(fs[0].section[:, 2:5, :], dat[:, 2:5, :]) assert np.array_equal(fs[0].section[3:6, 3, :], dat[3:6, 3, :]) assert np.array_equal(fs[0].section[3:6, 3:7, :], dat[3:6, 3:7, :]) assert np.array_equal(fs[0].section[:, ::2], dat[:, ::2]) assert np.array_equal(fs[0].section[:, [1, 2, 4], 3], dat[:, [1, 2, 4], 3]) bool_index = np.array( [True, False, True, True, False, False, True, True, False, True] ) assert np.array_equal(fs[0].section[:, bool_index, :], dat[:, bool_index, :]) assert np.array_equal(fs[0].section[3:6, 3, :, ...], dat[3:6, 3, :, ...]) assert np.array_equal(fs[0].section[..., ::2], dat[..., ::2]) assert np.array_equal(fs[0].section[..., [1, 2, 4], 3], dat[..., [1, 2, 4], 3]) # Can we use negative indices? assert np.array_equal(fs[0].section[-1], dat[-1]) assert np.array_equal(fs[0].section[-9:-7], dat[-9:-7]) assert np.array_equal(fs[0].section[-4, -6:-3, -1], dat[-4, -6:-3, -1]) fs.close() def test_section_data_single(self): a = np.array([1]) hdu = fits.PrimaryHDU(a) hdu.writeto(self.temp("test_new.fits")) hdul = fits.open(self.temp("test_new.fits")) sec = hdul[0].section dat = hdul[0].data assert np.array_equal(sec[0], dat[0]) assert np.array_equal(sec[...], dat[...]) assert np.array_equal(sec[..., 0], dat[..., 0]) assert np.array_equal(sec[0, ...], dat[0, ...]) hdul.close() def test_section_data_square(self): a = np.arange(4).reshape(2, 2) hdu = fits.PrimaryHDU(a) hdu.writeto(self.temp("test_new.fits")) hdul = fits.open(self.temp("test_new.fits")) d = hdul[0] dat = hdul[0].data assert (d.section[:, :] == dat[:, :]).all() assert (d.section[0, :] == dat[0, :]).all() assert (d.section[1, :] == dat[1, :]).all() assert (d.section[:, 0] == dat[:, 0]).all() assert (d.section[:, 1] == dat[:, 1]).all() assert (d.section[0, 0] == dat[0, 0]).all() assert (d.section[0, 1] == dat[0, 1]).all() assert (d.section[1, 0] == dat[1, 0]).all() assert (d.section[1, 1] == dat[1, 1]).all() assert (d.section[0:1, 0:1] == dat[0:1, 0:1]).all() assert (d.section[0:2, 0:1] == dat[0:2, 0:1]).all() assert (d.section[0:1, 0:2] == dat[0:1, 0:2]).all() assert (d.section[0:2, 0:2] == dat[0:2, 0:2]).all() hdul.close() def test_section_data_cube(self): a = np.arange(18).reshape(2, 3, 3) hdu = fits.PrimaryHDU(a) hdu.writeto(self.temp("test_new.fits")) hdul = fits.open(self.temp("test_new.fits")) d = hdul[0] dat = hdul[0].data assert (d.section[:] == dat[:]).all() assert (d.section[:, :] == dat[:, :]).all() # Test that various combinations of indexing on the section are equal to # indexing the data. # Testing all combinations of scalar-index and [:] for each dimension. for idx1 in [slice(None), 0, 1]: for idx2 in [slice(None), 0, 1, 2]: for idx3 in [slice(None), 0, 1, 2]: nd_idx = (idx1, idx2, idx3) assert (d.section[nd_idx] == dat[nd_idx]).all() # Test all ways to slice the last dimension but keeping the first two. for idx3 in [ slice(0, 1), slice(0, 2), slice(0, 3), slice(1, 2), slice(1, 3), slice(2, 3), ]: nd_idx = (slice(None), slice(None), idx3) assert (d.section[nd_idx] == dat[nd_idx]).all() # Test various combinations (not exhaustive) to slice all dimensions. for idx1 in [slice(0, 1), slice(1, 2)]: for idx2 in [ slice(0, 1), slice(0, 2), slice(0, 3), slice(1, 2), slice(1, 3), ]: for idx3 in [ slice(0, 1), slice(0, 2), slice(0, 3), slice(1, 2), slice(1, 3), slice(2, 3), ]: nd_idx = (idx1, idx2, idx3) assert (d.section[nd_idx] == dat[nd_idx]).all() hdul.close() def test_section_data_four(self): a = np.arange(256).reshape(4, 4, 4, 4) hdu = fits.PrimaryHDU(a) hdu.writeto(self.temp("test_new.fits")) hdul = fits.open(self.temp("test_new.fits")) d = hdul[0] dat = hdul[0].data assert (d.section[:, :, :, :] == dat[:, :, :, :]).all() assert (d.section[:, :, :] == dat[:, :, :]).all() assert (d.section[:, :] == dat[:, :]).all() assert (d.section[:] == dat[:]).all() assert (d.section[0, :, :, :] == dat[0, :, :, :]).all() assert (d.section[0, :, 0, :] == dat[0, :, 0, :]).all() assert (d.section[:, :, 0, :] == dat[:, :, 0, :]).all() assert (d.section[:, 1, 0, :] == dat[:, 1, 0, :]).all() assert (d.section[:, :, :, 1] == dat[:, :, :, 1]).all() hdul.close() def test_section_data_scaled(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/143 This is like test_section_data_square but uses a file containing scaled image data, to test that sections can work correctly with scaled data. """ hdul = fits.open(self.data("scale.fits")) d = hdul[0] dat = hdul[0].data assert (d.section[:, :] == dat[:, :]).all() assert (d.section[0, :] == dat[0, :]).all() assert (d.section[1, :] == dat[1, :]).all() assert (d.section[:, 0] == dat[:, 0]).all() assert (d.section[:, 1] == dat[:, 1]).all() assert (d.section[0, 0] == dat[0, 0]).all() assert (d.section[0, 1] == dat[0, 1]).all() assert (d.section[1, 0] == dat[1, 0]).all() assert (d.section[1, 1] == dat[1, 1]).all() assert (d.section[0:1, 0:1] == dat[0:1, 0:1]).all() assert (d.section[0:2, 0:1] == dat[0:2, 0:1]).all() assert (d.section[0:1, 0:2] == dat[0:1, 0:2]).all() assert (d.section[0:2, 0:2] == dat[0:2, 0:2]).all() hdul.close() # Test without having accessed the full data first hdul = fits.open(self.data("scale.fits")) d = hdul[0] assert (d.section[:, :] == dat[:, :]).all() assert (d.section[0, :] == dat[0, :]).all() assert (d.section[1, :] == dat[1, :]).all() assert (d.section[:, 0] == dat[:, 0]).all() assert (d.section[:, 1] == dat[:, 1]).all() assert (d.section[0, 0] == dat[0, 0]).all() assert (d.section[0, 1] == dat[0, 1]).all() assert (d.section[1, 0] == dat[1, 0]).all() assert (d.section[1, 1] == dat[1, 1]).all() assert (d.section[0:1, 0:1] == dat[0:1, 0:1]).all() assert (d.section[0:2, 0:1] == dat[0:2, 0:1]).all() assert (d.section[0:1, 0:2] == dat[0:1, 0:2]).all() assert (d.section[0:2, 0:2] == dat[0:2, 0:2]).all() assert not d._data_loaded hdul.close() def test_do_not_scale_image_data(self): with fits.open(self.data("scale.fits"), do_not_scale_image_data=True) as hdul: assert hdul[0].data.dtype == np.dtype(">i2") with fits.open(self.data("scale.fits")) as hdul: assert hdul[0].data.dtype == np.dtype("float32") def test_append_uint_data(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/56 (BZERO and BSCALE added in the wrong location when appending scaled data) """ fits.writeto(self.temp("test_new.fits"), data=np.array([], dtype="uint8")) d = np.zeros([100, 100]).astype("uint16") fits.append(self.temp("test_new.fits"), data=d) with fits.open(self.temp("test_new.fits"), uint=True) as f: assert f[1].data.dtype == "uint16" def test_scale_with_explicit_bzero_bscale(self): """ Regression test for https://github.com/astropy/astropy/issues/6399 """ hdu2 = fits.ImageHDU(np.random.rand(100, 100)) # The line below raised an exception in astropy 2.0, so if it does not # raise an error here, that is progress. hdu2.scale(type="uint8", bscale=1, bzero=0) def test_uint_header_consistency(self): """ Regression test for https://github.com/astropy/astropy/issues/2305 This ensures that an HDU containing unsigned integer data always has the appropriate BZERO value in its header. """ for int_size in (16, 32, 64): # Just make an array of some unsigned ints that wouldn't fit in a # signed int array of the same bit width max_uint = (2**int_size) - 1 if int_size == 64: max_uint = np.uint64(int_size) dtype = f"uint{int_size}" arr = np.empty(100, dtype=dtype) arr.fill(max_uint) arr -= np.arange(100, dtype=dtype) uint_hdu = fits.PrimaryHDU(data=arr) assert np.all(uint_hdu.data == arr) assert uint_hdu.data.dtype.name == f"uint{int_size}" assert "BZERO" in uint_hdu.header assert uint_hdu.header["BZERO"] == (2 ** (int_size - 1)) filename = f"uint{int_size}.fits" uint_hdu.writeto(self.temp(filename)) with fits.open(self.temp(filename), uint=True) as hdul: new_uint_hdu = hdul[0] assert np.all(new_uint_hdu.data == arr) assert new_uint_hdu.data.dtype.name == f"uint{int_size}" assert "BZERO" in new_uint_hdu.header assert new_uint_hdu.header["BZERO"] == (2 ** (int_size - 1)) @pytest.mark.parametrize(("from_file"), (False, True)) @pytest.mark.parametrize(("do_not_scale"), (False,)) def test_uint_header_keywords_removed_after_bitpix_change( self, from_file, do_not_scale ): """ Regression test for https://github.com/astropy/astropy/issues/4974 BZERO/BSCALE should be removed if data is converted to a floating point type. Currently excluding the case where do_not_scale_image_data=True because it is not clear what the expectation should be. """ arr = np.zeros(100, dtype="uint16") if from_file: # To generate the proper input file we always want to scale the # data before writing it...otherwise when we open it will be # regular (signed) int data. tmp_uint = fits.PrimaryHDU(arr) filename = "unsigned_int.fits" tmp_uint.writeto(self.temp(filename)) with fits.open( self.temp(filename), do_not_scale_image_data=do_not_scale ) as f: uint_hdu = f[0] # Force a read before we close. _ = uint_hdu.data else: uint_hdu = fits.PrimaryHDU(arr, do_not_scale_image_data=do_not_scale) # Make sure appropriate keywords are in the header. See # https://github.com/astropy/astropy/pull/3916#issuecomment-122414532 # for discussion. assert "BSCALE" in uint_hdu.header assert "BZERO" in uint_hdu.header assert uint_hdu.header["BSCALE"] == 1 assert uint_hdu.header["BZERO"] == 32768 # Convert data to floating point... uint_hdu.data = uint_hdu.data * 1.0 # ...bitpix should be negative. assert uint_hdu.header["BITPIX"] < 0 # BSCALE and BZERO should NOT be in header any more. assert "BSCALE" not in uint_hdu.header assert "BZERO" not in uint_hdu.header # This is the main test...the data values should round trip # as zero. filename = "test_uint_to_float.fits" uint_hdu.writeto(self.temp(filename)) with fits.open(self.temp(filename)) as hdul: assert (hdul[0].data == 0).all() def test_blanks(self): """Test image data with blank spots in it (which should show up as NaNs in the data array. """ arr = np.zeros((10, 10), dtype=np.int32) # One row will be blanks arr[1] = 999 hdu = fits.ImageHDU(data=arr) hdu.header["BLANK"] = 999 hdu.writeto(self.temp("test_new.fits")) with fits.open(self.temp("test_new.fits")) as hdul: assert np.isnan(hdul[1].data[1]).all() def test_invalid_blanks(self): """ Test that invalid use of the BLANK keyword leads to an appropriate warning, and that the BLANK keyword is ignored when returning the HDU data. Regression test for https://github.com/astropy/astropy/issues/3865 """ arr = np.arange(5, dtype=np.float64) hdu = fits.PrimaryHDU(data=arr) hdu.header["BLANK"] = 2 with pytest.warns( AstropyUserWarning, match="Invalid 'BLANK' keyword in header" ) as w: hdu.writeto(self.temp("test_new.fits")) # Allow the HDU to be written, but there should be a warning # when writing a header with BLANK when then data is not # int assert len(w) == 1 # Should also get a warning when opening the file, and the BLANK # value should not be applied with pytest.warns( AstropyUserWarning, match="Invalid 'BLANK' keyword in header" ) as w: with fits.open(self.temp("test_new.fits")) as h: assert np.all(arr == h[0].data) assert len(w) == 1 @pytest.mark.filterwarnings("ignore:Invalid 'BLANK' keyword in header") def test_scale_back_with_blanks(self): """ Test that when auto-rescaling integer data with "blank" values (where the blanks are replaced by NaN in the float data), that the "BLANK" keyword is removed from the header. Further, test that when using the ``scale_back=True`` option the blank values are restored properly. Regression test for https://github.com/astropy/astropy/issues/3865 """ # Make the sample file arr = np.arange(5, dtype=np.int32) hdu = fits.PrimaryHDU(data=arr) hdu.scale("int16", bscale=1.23) # Creating data that uses BLANK is currently kludgy--a separate issue # TODO: Rewrite this test when scaling with blank support is better # supported # Let's just add a value to the data that should be converted to NaN # when it is read back in: filename = self.temp("test.fits") hdu.data[0] = 9999 hdu.header["BLANK"] = 9999 hdu.writeto(filename) with fits.open(filename) as hdul: data = hdul[0].data assert np.isnan(data[0]) with pytest.warns( fits.verify.VerifyWarning, match=r"Invalid 'BLANK' keyword in header" ): hdul.writeto(self.temp("test2.fits")) # Now reopen the newly written file. It should not have a 'BLANK' # keyword with fits.open(self.temp("test2.fits")) as hdul2: assert "BLANK" not in hdul2[0].header data = hdul2[0].data assert np.isnan(data[0]) # Finally, test that scale_back keeps the BLANKs correctly with fits.open(filename, scale_back=True, mode="update") as hdul3: data = hdul3[0].data # This emits warning that pytest cannot catch properly, so we # catch it with pytest.mark.filterwarnings above. assert np.isnan(data[0]) with fits.open(filename, do_not_scale_image_data=True) as hdul4: assert hdul4[0].header["BLANK"] == 9999 assert hdul4[0].header["BSCALE"] == 1.23 assert hdul4[0].data[0] == 9999 def test_bzero_with_floats(self): """Test use of the BZERO keyword in an image HDU containing float data. """ arr = np.zeros((10, 10)) - 1 hdu = fits.ImageHDU(data=arr) hdu.header["BZERO"] = 1.0 hdu.writeto(self.temp("test_new.fits")) with fits.open(self.temp("test_new.fits")) as hdul: arr += 1 assert (hdul[1].data == arr).all() def test_rewriting_large_scaled_image(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/84 and https://aeon.stsci.edu/ssb/trac/pyfits/ticket/101 """ hdul = fits.open(self.data("fixed-1890.fits")) orig_data = hdul[0].data hdul.writeto(self.temp("test_new.fits"), overwrite=True) hdul.close() hdul = fits.open(self.temp("test_new.fits")) assert (hdul[0].data == orig_data).all() hdul.close() # Just as before, but this time don't touch hdul[0].data before writing # back out--this is the case that failed in # https://aeon.stsci.edu/ssb/trac/pyfits/ticket/84 hdul = fits.open(self.data("fixed-1890.fits")) hdul.writeto(self.temp("test_new.fits"), overwrite=True) hdul.close() hdul = fits.open(self.temp("test_new.fits")) assert (hdul[0].data == orig_data).all() hdul.close() # Test opening/closing/reopening a scaled file in update mode hdul = fits.open(self.data("fixed-1890.fits"), do_not_scale_image_data=True) hdul.writeto( self.temp("test_new.fits"), overwrite=True, output_verify="silentfix" ) hdul.close() hdul = fits.open(self.temp("test_new.fits")) orig_data = hdul[0].data hdul.close() hdul = fits.open(self.temp("test_new.fits"), mode="update") hdul.close() hdul = fits.open(self.temp("test_new.fits")) assert (hdul[0].data == orig_data).all() hdul.close() def test_image_update_header(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/105 Replacing the original header to an image HDU and saving should update the NAXISn keywords appropriately and save the image data correctly. """ # Copy the original file before saving to it self.copy_file("test0.fits") with fits.open(self.temp("test0.fits"), mode="update") as hdul: orig_data = hdul[1].data.copy() hdr_copy = hdul[1].header.copy() del hdr_copy["NAXIS*"] hdul[1].header = hdr_copy with fits.open(self.temp("test0.fits")) as hdul: assert (orig_data == hdul[1].data).all() # The test below raised a `ResourceWarning: unclosed transport` exception # due to a bug in Python <=3.10 (cf. cpython#90476) @pytest.mark.filterwarnings("ignore:unclosed transport <asyncio.sslproto") def test_open_scaled_in_update_mode(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/119 (Don't update scaled image data if the data is not read) This ensures that merely opening and closing a file containing scaled image data does not cause any change to the data (or the header). Changes should only occur if the data is accessed. """ # Copy the original file before making any possible changes to it self.copy_file("scale.fits") mtime = os.stat(self.temp("scale.fits")).st_mtime time.sleep(1) fits.open(self.temp("scale.fits"), mode="update").close() # Ensure that no changes were made to the file merely by immediately # opening and closing it. assert mtime == os.stat(self.temp("scale.fits")).st_mtime # Insert a slight delay to ensure the mtime does change when the file # is changed time.sleep(1) hdul = fits.open(self.temp("scale.fits"), "update") orig_data = hdul[0].data hdul.close() # Now the file should be updated with the rescaled data assert mtime != os.stat(self.temp("scale.fits")).st_mtime hdul = fits.open(self.temp("scale.fits"), mode="update") assert hdul[0].data.dtype == np.dtype(">f4") assert hdul[0].header["BITPIX"] == -32 assert "BZERO" not in hdul[0].header assert "BSCALE" not in hdul[0].header assert (orig_data == hdul[0].data).all() # Try reshaping the data, then closing and reopening the file; let's # see if all the changes are preserved properly hdul[0].data.shape = (42, 10) hdul.close() hdul = fits.open(self.temp("scale.fits")) assert hdul[0].shape == (42, 10) assert hdul[0].data.dtype == np.dtype(">f4") assert hdul[0].header["BITPIX"] == -32 assert "BZERO" not in hdul[0].header assert "BSCALE" not in hdul[0].header hdul.close() def test_scale_back(self): """A simple test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/120 The scale_back feature for image HDUs. """ self.copy_file("scale.fits") with fits.open(self.temp("scale.fits"), mode="update", scale_back=True) as hdul: orig_bitpix = hdul[0].header["BITPIX"] orig_bzero = hdul[0].header["BZERO"] orig_bscale = hdul[0].header["BSCALE"] orig_data = hdul[0].data.copy() hdul[0].data[0] = 0 with fits.open(self.temp("scale.fits"), do_not_scale_image_data=True) as hdul: assert hdul[0].header["BITPIX"] == orig_bitpix assert hdul[0].header["BZERO"] == orig_bzero assert hdul[0].header["BSCALE"] == orig_bscale zero_point = int(math.floor(-orig_bzero / orig_bscale)) assert (hdul[0].data[0] == zero_point).all() with fits.open(self.temp("scale.fits")) as hdul: assert (hdul[0].data[1:] == orig_data[1:]).all() def test_image_none(self): """ Regression test for https://github.com/spacetelescope/PyFITS/issues/27 """ with fits.open(self.data("test0.fits")) as h: h[1].data h[1].data = None h[1].writeto(self.temp("test.fits")) with fits.open(self.temp("test.fits")) as h: assert h[1].data is None assert h[1].header["NAXIS"] == 0 assert "NAXIS1" not in h[1].header assert "NAXIS2" not in h[1].header def test_invalid_blank(self): """ Regression test for https://github.com/astropy/astropy/issues/2711 If the BLANK keyword contains an invalid value it should be ignored for any calculations (though a warning should be issued). """ data = np.arange(100, dtype=np.float64) hdu = fits.PrimaryHDU(data) hdu.header["BLANK"] = "nan" with pytest.warns( fits.verify.VerifyWarning, match=r"Invalid value for " r"'BLANK' keyword in header: 'nan'", ): hdu.writeto(self.temp("test.fits")) with pytest.warns(AstropyUserWarning) as w: with fits.open(self.temp("test.fits")) as hdul: assert np.all(hdul[0].data == data) assert len(w) == 2 msg = "Invalid value for 'BLANK' keyword in header" assert msg in str(w[0].message) msg = "Invalid 'BLANK' keyword" assert msg in str(w[1].message) def test_scaled_image_fromfile(self): """ Regression test for https://github.com/astropy/astropy/issues/2710 """ # Make some sample data a = np.arange(100, dtype=np.float32) hdu = fits.PrimaryHDU(data=a.copy()) hdu.scale(bscale=1.1) hdu.writeto(self.temp("test.fits")) with open(self.temp("test.fits"), "rb") as f: file_data = f.read() hdul = fits.HDUList.fromstring(file_data) assert np.allclose(hdul[0].data, a) def test_set_data(self): """ Test data assignment - issue #5087 """ im = fits.ImageHDU() ar = np.arange(12) im.data = ar def test_scale_bzero_with_int_data(self): """ Regression test for https://github.com/astropy/astropy/issues/4600 """ a = np.arange(100, 200, dtype=np.int16) hdu1 = fits.PrimaryHDU(data=a.copy()) hdu2 = fits.PrimaryHDU(data=a.copy()) # Previously the following line would throw a TypeError, # now it should be identical to the integer bzero case hdu1.scale("int16", bzero=99.0) hdu2.scale("int16", bzero=99) assert np.allclose(hdu1.data, hdu2.data) def test_scale_back_uint_assignment(self): """ Extend fix for #4600 to assignment to data Suggested by: https://github.com/astropy/astropy/pull/4602#issuecomment-208713748 """ a = np.arange(100, 200, dtype=np.uint16) fits.PrimaryHDU(a).writeto(self.temp("test.fits")) with fits.open(self.temp("test.fits"), mode="update", scale_back=True) as ( hdu, ): hdu.data[:] = 0 assert np.allclose(hdu.data, 0) def test_hdu_creation_with_scalar(self): msg = r"data object array\(1\) should have at least one dimension" with pytest.raises(TypeError, match=msg): fits.ImageHDU(data=1) with pytest.raises(TypeError, match=msg): fits.PrimaryHDU(data=1) class TestCompressedImage(FitsTestCase): def test_empty(self): """ Regression test for https://github.com/astropy/astropy/issues/2595 """ hdu = fits.CompImageHDU() assert hdu.data is None hdu.writeto(self.temp("test.fits")) with fits.open(self.temp("test.fits"), mode="update") as hdul: assert len(hdul) == 2 assert isinstance(hdul[1], fits.CompImageHDU) assert hdul[1].data is None # Now test replacing the empty data with an array and see what # happens hdul[1].data = np.arange(100, dtype=np.int32) with fits.open(self.temp("test.fits")) as hdul: assert len(hdul) == 2 assert isinstance(hdul[1], fits.CompImageHDU) assert np.all(hdul[1].data == np.arange(100, dtype=np.int32)) @pytest.mark.parametrize( ("data", "compression_type", "quantize_level"), [ (np.zeros((2, 10, 10), dtype=np.float32), "RICE_1", 16), (np.zeros((2, 10, 10), dtype=np.float32), "GZIP_1", -0.01), (np.zeros((2, 10, 10), dtype=np.float32), "GZIP_2", -0.01), (np.zeros((100, 100)) + 1, "HCOMPRESS_1", 16), (np.zeros((10, 10)), "PLIO_1", 16), ], ) @pytest.mark.parametrize("byte_order", ["<", ">"]) def test_comp_image(self, data, compression_type, quantize_level, byte_order): data = data.newbyteorder(byte_order) primary_hdu = fits.PrimaryHDU() ofd = fits.HDUList(primary_hdu) chdu = fits.CompImageHDU( data, name="SCI", compression_type=compression_type, quantize_level=quantize_level, ) ofd.append(chdu) ofd.writeto(self.temp("test_new.fits"), overwrite=True) ofd.close() with fits.open(self.temp("test_new.fits")) as fd: assert (fd[1].data == data).all() assert fd[1].header["NAXIS"] == chdu.header["NAXIS"] assert fd[1].header["NAXIS1"] == chdu.header["NAXIS1"] assert fd[1].header["NAXIS2"] == chdu.header["NAXIS2"] assert fd[1].header["BITPIX"] == chdu.header["BITPIX"] @pytest.mark.remote_data def test_comp_image_quantize_level(self): """ Regression test for https://github.com/astropy/astropy/issues/5969 Test that quantize_level is used. """ import pickle np.random.seed(42) # Basically what scipy.datasets.ascent() does. fname = download_file( "https://github.com/scipy/dataset-ascent/blob/main/ascent.dat?raw=true" ) with open(fname, "rb") as f: scipy_data = np.array(pickle.load(f)) data = scipy_data + np.random.randn(512, 512) * 10 fits.ImageHDU(data).writeto(self.temp("im1.fits")) fits.CompImageHDU( data, compression_type="RICE_1", quantize_method=1, quantize_level=-1, dither_seed=5, ).writeto(self.temp("im2.fits")) fits.CompImageHDU( data, compression_type="RICE_1", quantize_method=1, quantize_level=-100, dither_seed=5, ).writeto(self.temp("im3.fits")) im1 = fits.getdata(self.temp("im1.fits")) im2 = fits.getdata(self.temp("im2.fits")) im3 = fits.getdata(self.temp("im3.fits")) assert not np.array_equal(im2, im3) assert np.isclose(np.min(im1 - im2), -0.5, atol=1e-3) assert np.isclose(np.max(im1 - im2), 0.5, atol=1e-3) assert np.isclose(np.min(im1 - im3), -50, atol=1e-1) assert np.isclose(np.max(im1 - im3), 50, atol=1e-1) def test_comp_image_hcompression_1_invalid_data(self): """ Tests compression with the HCOMPRESS_1 algorithm with data that is not 2D and has a non-2D tile size. """ pytest.raises( ValueError, fits.CompImageHDU, np.zeros((2, 10, 10), dtype=np.float32), name="SCI", compression_type="HCOMPRESS_1", quantize_level=16, tile_size=[2, 10, 10], ) def test_comp_image_hcompress_image_stack(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/171 Tests that data containing more than two dimensions can be compressed with HCOMPRESS_1 so long as the user-supplied tile size can be flattened to two dimensions. """ cube = np.arange(300, dtype=np.float32).reshape(3, 10, 10) hdu = fits.CompImageHDU( data=cube, name="SCI", compression_type="HCOMPRESS_1", quantize_level=16, tile_size=[5, 5, 1], ) hdu.writeto(self.temp("test.fits")) with fits.open(self.temp("test.fits")) as hdul: # HCOMPRESSed images are allowed to deviate from the original by # about 1/quantize_level of the RMS in each tile. assert np.abs(hdul["SCI"].data - cube).max() < 1.0 / 15.0 def test_subtractive_dither_seed(self): """ Regression test for https://github.com/spacetelescope/PyFITS/issues/32 Ensure that when floating point data is compressed with the SUBTRACTIVE_DITHER_1 quantization method that the correct ZDITHER0 seed is added to the header, and that the data can be correctly decompressed. """ array = np.arange(100.0).reshape(10, 10) csum = (array[0].view("uint8").sum() % 10000) + 1 hdu = fits.CompImageHDU( data=array, quantize_method=SUBTRACTIVE_DITHER_1, dither_seed=DITHER_SEED_CHECKSUM, ) hdu.writeto(self.temp("test.fits")) with fits.open(self.temp("test.fits")) as hdul: assert isinstance(hdul[1], fits.CompImageHDU) assert "ZQUANTIZ" in hdul[1]._header assert hdul[1]._header["ZQUANTIZ"] == "SUBTRACTIVE_DITHER_1" assert "ZDITHER0" in hdul[1]._header assert hdul[1]._header["ZDITHER0"] == csum assert np.all(hdul[1].data == array) def test_disable_image_compression(self): with fits.open(self.data("comp.fits"), disable_image_compression=True) as hdul: # The compressed image HDU should show up as a BinTableHDU, but # *not* a CompImageHDU assert isinstance(hdul[1], fits.BinTableHDU) assert not isinstance(hdul[1], fits.CompImageHDU) with fits.open(self.data("comp.fits")) as hdul: assert isinstance(hdul[1], fits.CompImageHDU) def test_open_comp_image_in_update_mode(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/167 Similar to test_open_scaled_in_update_mode(), but specifically for compressed images. """ # Copy the original file before making any possible changes to it self.copy_file("comp.fits") mtime = os.stat(self.temp("comp.fits")).st_mtime time.sleep(1) fits.open(self.temp("comp.fits"), mode="update").close() # Ensure that no changes were made to the file merely by immediately # opening and closing it. assert mtime == os.stat(self.temp("comp.fits")).st_mtime @pytest.mark.slow def test_open_scaled_in_update_mode_compressed(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/88 2 Identical to test_open_scaled_in_update_mode() but with a compressed version of the scaled image. """ # Copy+compress the original file before making any possible changes to # it with fits.open(self.data("scale.fits"), do_not_scale_image_data=True) as hdul: chdu = fits.CompImageHDU(data=hdul[0].data, header=hdul[0].header) chdu.writeto(self.temp("scale.fits")) mtime = os.stat(self.temp("scale.fits")).st_mtime time.sleep(1) fits.open(self.temp("scale.fits"), mode="update").close() # Ensure that no changes were made to the file merely by immediately # opening and closing it. assert mtime == os.stat(self.temp("scale.fits")).st_mtime # Insert a slight delay to ensure the mtime does change when the file # is changed time.sleep(1) hdul = fits.open(self.temp("scale.fits"), "update") hdul[1].data hdul.close() # Now the file should be updated with the rescaled data assert mtime != os.stat(self.temp("scale.fits")).st_mtime hdul = fits.open(self.temp("scale.fits"), mode="update") assert hdul[1].data.dtype == np.dtype("float32") assert hdul[1].header["BITPIX"] == -32 assert "BZERO" not in hdul[1].header assert "BSCALE" not in hdul[1].header # Try reshaping the data, then closing and reopening the file; let's # see if all the changes are preserved properly hdul[1].data.shape = (42, 10) hdul.close() hdul = fits.open(self.temp("scale.fits")) assert hdul[1].shape == (42, 10) assert hdul[1].data.dtype == np.dtype("float32") assert hdul[1].header["BITPIX"] == -32 assert "BZERO" not in hdul[1].header assert "BSCALE" not in hdul[1].header hdul.close() def test_write_comp_hdu_direct_from_existing(self): with fits.open(self.data("comp.fits")) as hdul: hdul[1].writeto(self.temp("test.fits")) with fits.open(self.data("comp.fits")) as hdul1: with fits.open(self.temp("test.fits")) as hdul2: assert np.all(hdul1[1].data == hdul2[1].data) assert comparerecords( hdul1[1].compressed_data, hdul2[1].compressed_data ) def test_rewriting_large_scaled_image_compressed(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/88 1 Identical to test_rewriting_large_scaled_image() but with a compressed image. """ with fits.open( self.data("fixed-1890.fits"), do_not_scale_image_data=True ) as hdul: chdu = fits.CompImageHDU(data=hdul[0].data, header=hdul[0].header) chdu.writeto(self.temp("fixed-1890-z.fits")) hdul = fits.open(self.temp("fixed-1890-z.fits")) orig_data = hdul[1].data hdul.writeto(self.temp("test_new.fits"), overwrite=True) hdul.close() hdul = fits.open(self.temp("test_new.fits")) assert (hdul[1].data == orig_data).all() hdul.close() # Just as before, but this time don't touch hdul[0].data before writing # back out--this is the case that failed in # https://aeon.stsci.edu/ssb/trac/pyfits/ticket/84 hdul = fits.open(self.temp("fixed-1890-z.fits")) hdul.writeto(self.temp("test_new.fits"), overwrite=True) hdul.close() hdul = fits.open(self.temp("test_new.fits")) assert (hdul[1].data == orig_data).all() hdul.close() # Test opening/closing/reopening a scaled file in update mode hdul = fits.open(self.temp("fixed-1890-z.fits"), do_not_scale_image_data=True) hdul.writeto( self.temp("test_new.fits"), overwrite=True, output_verify="silentfix" ) hdul.close() hdul = fits.open(self.temp("test_new.fits")) orig_data = hdul[1].data hdul.close() hdul = fits.open(self.temp("test_new.fits"), mode="update") hdul.close() hdul = fits.open(self.temp("test_new.fits")) assert (hdul[1].data == orig_data).all() hdul.close() def test_scale_back_compressed(self): """ Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/88 3 Identical to test_scale_back() but uses a compressed image. """ # Create a compressed version of the scaled image with fits.open(self.data("scale.fits"), do_not_scale_image_data=True) as hdul: chdu = fits.CompImageHDU(data=hdul[0].data, header=hdul[0].header) chdu.writeto(self.temp("scale.fits")) with fits.open(self.temp("scale.fits"), mode="update", scale_back=True) as hdul: orig_bitpix = hdul[1].header["BITPIX"] orig_bzero = hdul[1].header["BZERO"] orig_bscale = hdul[1].header["BSCALE"] orig_data = hdul[1].data.copy() hdul[1].data[0] = 0 with fits.open(self.temp("scale.fits"), do_not_scale_image_data=True) as hdul: assert hdul[1].header["BITPIX"] == orig_bitpix assert hdul[1].header["BZERO"] == orig_bzero assert hdul[1].header["BSCALE"] == orig_bscale zero_point = int(math.floor(-orig_bzero / orig_bscale)) assert (hdul[1].data[0] == zero_point).all() with fits.open(self.temp("scale.fits")) as hdul: assert (hdul[1].data[1:] == orig_data[1:]).all() # Extra test to ensure that after everything the data is still the # same as in the original uncompressed version of the image with fits.open(self.data("scale.fits")) as hdul2: # Recall we made the same modification to the data in hdul # above hdul2[0].data[0] = 0 assert (hdul[1].data == hdul2[0].data).all() def test_lossless_gzip_compression(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/198""" rng = np.random.default_rng(42) noise = rng.normal(size=(20, 20)) chdu1 = fits.CompImageHDU(data=noise, compression_type="GZIP_1") # First make a test image with lossy compression and make sure it # wasn't compressed perfectly. This shouldn't happen ever, but just to # make sure the test non-trivial. chdu1.writeto(self.temp("test.fits")) with fits.open(self.temp("test.fits")) as h: assert np.abs(noise - h[1].data).max() > 0.0 del h chdu2 = fits.CompImageHDU( data=noise, compression_type="GZIP_1", quantize_level=0.0 ) # No quantization chdu2.writeto(self.temp("test.fits"), overwrite=True) with fits.open(self.temp("test.fits")) as h: assert (noise == h[1].data).all() def test_compression_column_tforms(self): """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/199""" # Some interestingly tiled data so that some of it is quantized and # some of it ends up just getting gzip-compressed data2 = (np.arange(1, 8, dtype=np.float32) * 10)[:, np.newaxis] + np.arange( 1, 7 ) np.random.seed(1337) data1 = np.random.uniform(size=(6 * 4, 7 * 4)) data1[: data2.shape[0], : data2.shape[1]] = data2 chdu = fits.CompImageHDU(data1, compression_type="RICE_1", tile_size=(6, 7)) chdu.writeto(self.temp("test.fits")) with fits.open(self.temp("test.fits"), disable_image_compression=True) as h: assert re.match(r"^1PB\(\d+\)$", h[1].header["TFORM1"]) assert re.match(r"^1PB\(\d+\)$", h[1].header["TFORM2"]) def test_compression_update_header(self): """Regression test for https://github.com/spacetelescope/PyFITS/issues/23 """ self.copy_file("comp.fits") with fits.open(self.temp("comp.fits"), mode="update") as hdul: assert isinstance(hdul[1], fits.CompImageHDU) hdul[1].header["test1"] = "test" hdul[1]._header["test2"] = "test2" with fits.open(self.temp("comp.fits")) as hdul: assert "test1" in hdul[1].header assert hdul[1].header["test1"] == "test" assert "test2" in hdul[1].header assert hdul[1].header["test2"] == "test2" # Test update via index now: with fits.open(self.temp("comp.fits"), mode="update") as hdul: hdr = hdul[1].header hdr[hdr.index("TEST1")] = "foo" with fits.open(self.temp("comp.fits")) as hdul: assert hdul[1].header["TEST1"] == "foo" # Test slice updates with fits.open(self.temp("comp.fits"), mode="update") as hdul: hdul[1].header["TEST*"] = "qux" with fits.open(self.temp("comp.fits")) as hdul: assert list(hdul[1].header["TEST*"].values()) == ["qux", "qux"] with fits.open(self.temp("comp.fits"), mode="update") as hdul: hdr = hdul[1].header idx = hdr.index("TEST1") hdr[idx : idx + 2] = "bar" with fits.open(self.temp("comp.fits")) as hdul: assert list(hdul[1].header["TEST*"].values()) == ["bar", "bar"] # Test updating a specific COMMENT card duplicate with fits.open(self.temp("comp.fits"), mode="update") as hdul: hdul[1].header[("COMMENT", 1)] = "I am fire. I am death!" with fits.open(self.temp("comp.fits")) as hdul: assert hdul[1].header["COMMENT"][1] == "I am fire. I am death!" assert hdul[1]._header["COMMENT"][1] == "I am fire. I am death!" # Test deleting by keyword and by slice with fits.open(self.temp("comp.fits"), mode="update") as hdul: hdr = hdul[1].header del hdr["COMMENT"] idx = hdr.index("TEST1") del hdr[idx : idx + 2] with fits.open(self.temp("comp.fits")) as hdul: assert "COMMENT" not in hdul[1].header assert "COMMENT" not in hdul[1]._header assert "TEST1" not in hdul[1].header assert "TEST1" not in hdul[1]._header assert "TEST2" not in hdul[1].header assert "TEST2" not in hdul[1]._header def test_compression_update_header_with_reserved(self): """ Ensure that setting reserved keywords related to the table data structure on CompImageHDU image headers fails. """ def test_set_keyword(hdr, keyword, value): with pytest.warns(UserWarning) as w: hdr[keyword] = value assert len(w) == 1 assert str(w[0].message).startswith(f"Keyword {keyword!r} is reserved") assert keyword not in hdr with fits.open(self.data("comp.fits")) as hdul: hdr = hdul[1].header test_set_keyword(hdr, "TFIELDS", 8) test_set_keyword(hdr, "TTYPE1", "Foo") test_set_keyword(hdr, "ZCMPTYPE", "ASDF") test_set_keyword(hdr, "ZVAL1", "Foo") def test_compression_header_append(self): with fits.open(self.data("comp.fits")) as hdul: imghdr = hdul[1].header tblhdr = hdul[1]._header with pytest.warns(UserWarning, match="Keyword 'TFIELDS' is reserved") as w: imghdr.append("TFIELDS") assert len(w) == 1 assert "TFIELDS" not in imghdr imghdr.append(("FOO", "bar", "qux"), end=True) assert "FOO" in imghdr assert imghdr[-1] == "bar" assert "FOO" in tblhdr assert tblhdr[-1] == "bar" imghdr.append(("CHECKSUM", "abcd1234")) assert "CHECKSUM" in imghdr assert imghdr["CHECKSUM"] == "abcd1234" assert "CHECKSUM" not in tblhdr assert "ZHECKSUM" in tblhdr assert tblhdr["ZHECKSUM"] == "abcd1234" def test_compression_header_append2(self): """ Regression test for issue https://github.com/astropy/astropy/issues/5827 """ with fits.open(self.data("comp.fits")) as hdul: header = hdul[1].header while len(header) < 1000: header.append() # pad with grow room # Append stats to header: header.append(("Q1_OSAVG", 1, "[adu] quadrant 1 overscan mean")) header.append(("Q1_OSSTD", 1, "[adu] quadrant 1 overscan stddev")) header.append(("Q1_OSMED", 1, "[adu] quadrant 1 overscan median")) def test_compression_header_insert(self): with fits.open(self.data("comp.fits")) as hdul: imghdr = hdul[1].header tblhdr = hdul[1]._header # First try inserting a restricted keyword with pytest.warns(UserWarning, match="Keyword 'TFIELDS' is reserved") as w: imghdr.insert(1000, "TFIELDS") assert len(w) == 1 assert "TFIELDS" not in imghdr assert tblhdr.count("TFIELDS") == 1 # First try keyword-relative insert imghdr.insert("TELESCOP", ("OBSERVER", "Phil Plait")) assert "OBSERVER" in imghdr assert imghdr.index("OBSERVER") == imghdr.index("TELESCOP") - 1 assert "OBSERVER" in tblhdr assert tblhdr.index("OBSERVER") == tblhdr.index("TELESCOP") - 1 # Next let's see if an index-relative insert winds up being # sensible idx = imghdr.index("OBSERVER") imghdr.insert("OBSERVER", ("FOO",)) assert "FOO" in imghdr assert imghdr.index("FOO") == idx assert "FOO" in tblhdr assert tblhdr.index("FOO") == tblhdr.index("OBSERVER") - 1 def test_compression_header_set_before_after(self): with fits.open(self.data("comp.fits")) as hdul: imghdr = hdul[1].header tblhdr = hdul[1]._header with pytest.warns(UserWarning, match="Keyword 'ZBITPIX' is reserved ") as w: imghdr.set("ZBITPIX", 77, "asdf", after="XTENSION") assert len(w) == 1 assert "ZBITPIX" not in imghdr assert tblhdr.count("ZBITPIX") == 1 assert tblhdr["ZBITPIX"] != 77 # Move GCOUNT before PCOUNT (not that there's any reason you'd # *want* to do that, but it's just a test...) imghdr.set("GCOUNT", 99, before="PCOUNT") assert imghdr.index("GCOUNT") == imghdr.index("PCOUNT") - 1 assert imghdr["GCOUNT"] == 99 assert tblhdr.index("ZGCOUNT") == tblhdr.index("ZPCOUNT") - 1 assert tblhdr["ZGCOUNT"] == 99 assert tblhdr.index("PCOUNT") == 5 assert tblhdr.index("GCOUNT") == 6 assert tblhdr["GCOUNT"] == 1 imghdr.set("GCOUNT", 2, after="PCOUNT") assert imghdr.index("GCOUNT") == imghdr.index("PCOUNT") + 1 assert imghdr["GCOUNT"] == 2 assert tblhdr.index("ZGCOUNT") == tblhdr.index("ZPCOUNT") + 1 assert tblhdr["ZGCOUNT"] == 2 assert tblhdr.index("PCOUNT") == 5 assert tblhdr.index("GCOUNT") == 6 assert tblhdr["GCOUNT"] == 1 def test_compression_header_append_commentary(self): """ Regression test for https://github.com/astropy/astropy/issues/2363 """ hdu = fits.CompImageHDU(np.array([0], dtype=np.int32)) hdu.header["COMMENT"] = "hello world" assert hdu.header["COMMENT"] == ["hello world"] hdu.writeto(self.temp("test.fits")) with fits.open(self.temp("test.fits")) as hdul: assert hdul[1].header["COMMENT"] == ["hello world"] def test_compression_with_gzip_column(self): """ Regression test for https://github.com/spacetelescope/PyFITS/issues/71 """ arr = np.zeros((2, 7000), dtype="float32") # The first row (which will be the first compressed tile) has a very # wide range of values that will be difficult to quantize, and should # result in use of a GZIP_COMPRESSED_DATA column arr[0] = np.linspace(0, 1, 7000) arr[1] = np.random.normal(size=7000) hdu = fits.CompImageHDU(data=arr) hdu.writeto(self.temp("test.fits")) with fits.open(self.temp("test.fits")) as hdul: comp_hdu = hdul[1] # GZIP-compressed tile should compare exactly assert np.all(comp_hdu.data[0] == arr[0]) # The second tile uses lossy compression and may be somewhat off, # so we don't bother comparing it exactly def test_duplicate_compression_header_keywords(self): """ Regression test for https://github.com/astropy/astropy/issues/2750 Tests that the fake header (for the compressed image) can still be read even if the real header contained a duplicate ZTENSION keyword (the issue applies to any keyword specific to the compression convention, however). """ arr = np.arange(100, dtype=np.int32) hdu = fits.CompImageHDU(data=arr) header = hdu._header # append the duplicate keyword hdu._header.append(("ZTENSION", "IMAGE")) hdu.writeto(self.temp("test.fits")) with fits.open(self.temp("test.fits")) as hdul: assert header == hdul[1]._header # There's no good reason to have a duplicate keyword, but # technically it isn't invalid either :/ assert hdul[1]._header.count("ZTENSION") == 2 def test_scale_bzero_with_compressed_int_data(self): """ Regression test for https://github.com/astropy/astropy/issues/4600 and https://github.com/astropy/astropy/issues/4588 Identical to test_scale_bzero_with_int_data() but uses a compressed image. """ a = np.arange(100, 200, dtype=np.int16) hdu1 = fits.CompImageHDU(data=a.copy()) hdu2 = fits.CompImageHDU(data=a.copy()) # Previously the following line would throw a TypeError, # now it should be identical to the integer bzero case hdu1.scale("int16", bzero=99.0) hdu2.scale("int16", bzero=99) assert np.allclose(hdu1.data, hdu2.data) def test_scale_back_compressed_uint_assignment(self): """ Extend fix for #4600 to assignment to data Identical to test_scale_back_uint_assignment() but uses a compressed image. Suggested by: https://github.com/astropy/astropy/pull/4602#issuecomment-208713748 """ a = np.arange(100, 200, dtype=np.uint16) fits.CompImageHDU(a).writeto(self.temp("test.fits")) with fits.open(self.temp("test.fits"), mode="update", scale_back=True) as hdul: hdul[1].data[:] = 0 assert np.allclose(hdul[1].data, 0) def test_compressed_header_missing_znaxis(self): a = np.arange(100, 200, dtype=np.uint16) comp_hdu = fits.CompImageHDU(a) comp_hdu._header.pop("ZNAXIS") with pytest.raises(KeyError): comp_hdu.compressed_data comp_hdu = fits.CompImageHDU(a) comp_hdu._header.pop("ZBITPIX") with pytest.raises(KeyError): comp_hdu.compressed_data def test_compressed_header_double_extname(self): """Test that a double EXTNAME with one default value does not mask the non-default value.""" with fits.open(self.data("double_ext.fits")) as hdul: hdu = hdul[1] # Raw header has 2 EXTNAME entries indices = hdu._header._keyword_indices["EXTNAME"] assert len(indices) == 2 # The non-default name should be returned. assert hdu.name == "ccd00" assert "EXTNAME" in hdu.header assert hdu.name == hdu.header["EXTNAME"] # There should be 1 non-default EXTNAME entries. indices = hdu.header._keyword_indices["EXTNAME"] assert len(indices) == 1 # Test header sync from property set. new_name = "NEW_NAME" hdu.name = new_name assert hdu.name == new_name assert hdu.header["EXTNAME"] == new_name assert hdu._header["EXTNAME"] == new_name assert hdu._image_header["EXTNAME"] == new_name # Check that setting the header will change the name property. hdu.header["EXTNAME"] = "NEW2" assert hdu.name == "NEW2" hdul.writeto(self.temp("tmp.fits"), overwrite=True) with fits.open(self.temp("tmp.fits")) as hdul1: hdu1 = hdul1[1] assert len(hdu1._header._keyword_indices["EXTNAME"]) == 1 assert hdu1.name == "NEW2" # Check that deleting EXTNAME will and setting the name will # work properly. del hdu.header["EXTNAME"] hdu.name = "RE-ADDED" assert hdu.name == "RE-ADDED" with pytest.raises(TypeError): hdu.name = 42 def test_compressed_header_extname(self): """Test consistent EXTNAME / hdu name interaction.""" name = "FOO" hdu = fits.CompImageHDU(data=np.arange(10), name=name) assert hdu._header["EXTNAME"] == name assert hdu.header["EXTNAME"] == name assert hdu.name == name name = "BAR" hdu.name = name assert hdu._header["EXTNAME"] == name assert hdu.header["EXTNAME"] == name assert hdu.name == name assert len(hdu._header._keyword_indices["EXTNAME"]) == 1 def test_compressed_header_minimal(self): """ Regression test for https://github.com/astropy/astropy/issues/11694 Tests that CompImageHDU can be initialized with a Header that contains few or no cards, and doesn't require specific cards such as 'BITPIX' or 'NAXIS'. """ fits.CompImageHDU(data=np.arange(10), header=fits.Header()) header = fits.Header({"HELLO": "world"}) hdu = fits.CompImageHDU(data=np.arange(10), header=header) assert hdu.header["HELLO"] == "world" @pytest.mark.parametrize( ("keyword", "dtype", "expected"), [ ("BSCALE", np.uint8, np.float32), ("BSCALE", np.int16, np.float32), ("BSCALE", np.int32, np.float64), ("BZERO", np.uint8, np.float32), ("BZERO", np.int16, np.float32), ("BZERO", np.int32, np.float64), ], ) def test_compressed_scaled_float(self, keyword, dtype, expected): """ If BSCALE,BZERO is set to floating point values, the image should be floating-point. https://github.com/astropy/astropy/pull/6492 Parameters ---------- keyword : `str` Keyword to set to a floating-point value to trigger floating-point pixels. dtype : `numpy.dtype` Type of original array. expected : `numpy.dtype` Expected type of uncompressed array. """ value = 1.23345 # A floating-point value hdu = fits.CompImageHDU(np.arange(0, 10, dtype=dtype)) hdu.header[keyword] = value hdu.writeto(self.temp("test.fits")) del hdu with fits.open(self.temp("test.fits")) as hdu: assert hdu[1].header[keyword] == value assert hdu[1].data.dtype == expected @pytest.mark.parametrize( "dtype", (np.uint8, np.int16, np.uint16, np.int32, np.uint32) ) def test_compressed_integers(self, dtype): """Test that the various integer dtypes are correctly written and read. Regression test for https://github.com/astropy/astropy/issues/9072 """ mid = np.iinfo(dtype).max // 2 data = np.arange(mid - 50, mid + 50, dtype=dtype) testfile = self.temp("test.fits") hdu = fits.CompImageHDU(data=data) hdu.writeto(testfile, overwrite=True) new = fits.getdata(testfile) np.testing.assert_array_equal(data, new) def test_write_non_contiguous_data(self): """ Regression test for https://github.com/astropy/astropy/issues/2150 """ orig = np.arange(100, dtype=float).reshape((10, 10), order="f") assert not orig.flags.contiguous primary = fits.PrimaryHDU() hdu = fits.CompImageHDU(orig) hdulist = fits.HDUList([primary, hdu]) hdulist.writeto(self.temp("test.fits")) actual = fits.getdata(self.temp("test.fits")) assert_equal(orig, actual) def test_slice_and_write_comp_hdu(self): """ Regression test for https://github.com/astropy/astropy/issues/9955 """ with fits.open(self.data("comp.fits")) as hdul: hdul[1].data = hdul[1].data[:200, :100] assert not hdul[1].data.flags.contiguous hdul[1].writeto(self.temp("test.fits")) with fits.open(self.data("comp.fits")) as hdul1: with fits.open(self.temp("test.fits")) as hdul2: assert_equal(hdul1[1].data[:200, :100], hdul2[1].data) def test_comphdu_bscale(tmp_path): """ Regression test for a bug that caused extensions that used BZERO and BSCALE that got turned into CompImageHDU to end up with BZERO/BSCALE before the TFIELDS. """ filename1 = tmp_path / "3hdus.fits" filename2 = tmp_path / "3hdus_comp.fits" x = np.random.random((100, 100)) * 100 x0 = fits.PrimaryHDU() x1 = fits.ImageHDU(np.array(x - 50, dtype=int), uint=True) x1.header["BZERO"] = 20331 x1.header["BSCALE"] = 2.3 hdus = fits.HDUList([x0, x1]) hdus.writeto(filename1) # fitsverify (based on cfitsio) should fail on this file, only seeing the # first HDU. with fits.open(filename1) as hdus: hdus[1] = fits.CompImageHDU( data=hdus[1].data.astype(np.uint32), header=hdus[1].header ) hdus.writeto(filename2) # open again and verify with fits.open(filename2) as hdus: hdus[1].verify("exception") def test_scale_implicit_casting(): # Regression test for an issue that occurred because Numpy now does not # allow implicit type casting during inplace operations. hdu = fits.ImageHDU(np.array([1], dtype=np.int32)) hdu.scale(bzero=1.3) def test_bzero_implicit_casting_compressed(): # Regression test for an issue that occurred because Numpy now does not # allow implicit type casting during inplace operations. Astropy is # actually not able to produce a file that triggers the failure - the # issue occurs when using unsigned integer types in the FITS file, in which # case BZERO should be 32768. But if the keyword is stored as 32768.0, then # it was possible to trigger the implicit casting error. filename = get_pkg_data_filename("data/compressed_float_bzero.fits") with fits.open(filename) as hdul: hdu = hdul[1] hdu.data def test_bzero_mishandled_info(tmp_path): # Regression test for #5507: # Calling HDUList.info() on a dataset which applies a zeropoint # from BZERO but which astropy.io.fits does not think it needs # to resize to a new dtype results in an AttributeError. filename = tmp_path / "floatimg_with_bzero.fits" hdu = fits.ImageHDU(np.zeros((10, 10))) hdu.header["BZERO"] = 10 hdu.writeto(filename, overwrite=True) with fits.open(filename) as hdul: hdul.info() def test_image_write_readonly(tmp_path): # Regression test to make sure that we can write out read-only arrays (#5512) x = np.array([1, 2, 3]) x.setflags(write=False) ghdu = fits.ImageHDU(data=x) ghdu.add_datasum() filename = tmp_path / "test.fits" ghdu.writeto(filename) with fits.open(filename) as hdulist: assert_equal(hdulist[1].data, [1, 2, 3]) # Same for compressed HDU x = np.array([1.0, 2.0, 3.0]) x.setflags(write=False) ghdu = fits.CompImageHDU(data=x) # add_datasum does not work for CompImageHDU # ghdu.add_datasum() filename = tmp_path / "test2.fits" ghdu.writeto(filename) with fits.open(filename) as hdulist: assert_equal(hdulist[1].data, [1.0, 2.0, 3.0]) def test_int8(tmp_path): """Test for int8 support, https://github.com/astropy/astropy/issues/11995""" img = np.arange(-50, 50, dtype=np.int8).reshape(10, 10) hdu = fits.PrimaryHDU(img) hdu.writeto(tmp_path / "int8.fits") with fits.open(tmp_path / "int8.fits") as hdul: assert hdul[0].header["BITPIX"] == 8 assert hdul[0].header["BZERO"] == -128 assert hdul[0].header["BSCALE"] == 1.0 assert_equal(hdul[0].data, img) assert hdul[0].data.dtype == img.dtype
3ba67ff64ec3fdf5667bd5404058c35ad142b27bea3a6451fa3b330768db941e
""" This test file uses the https://github.com/esheldon/fitsio package to verify our compression and decompression routines against the implementation in cfitsio. *Note*: The fitsio library is GPL licensed, therefore it could be interpreted that so is this test file. Given that this test file isn't imported anywhere else in the code this shouldn't cause us any issues. Please bear this in mind when editing this file. """ import os import numpy as np import pytest from astropy.io import fits from .conftest import _expand, fitsio_param_to_astropy_param # This is so that tox can force this file to be run, and not be silently # skipped on CI, but in all other test runs it's skipped if fitsio isn't present. if "ASTROPY_ALWAYS_TEST_FITSIO" in os.environ: import fitsio else: fitsio = pytest.importorskip("fitsio") @pytest.fixture( scope="module", params=_expand( [((10,),), ((5,), (1,), (3,))], [((12, 12),), ((1, 12), (4, 5), (6, 6), None)], [((15, 15),), ((1, 15), (5, 1), (5, 5))], [ ((15, 15, 15),), ((5, 5, 1), (5, 7, 1), (1, 5, 4), (1, 1, 15), (15, 1, 5)), ], # Test the situation where the tile shape is passed larger than the # array shape [ ( (4, 4, 5), (5, 5, 5), ), ( (5, 5, 1), None, ), ], # Test shapes which caused errors # This one we can't test here as it causes segfaults in cfitsio # It is tested in test_roundtrip_high_D though. # [ # ((3, 4, 5),), # ((1, 2, 3),), # ], # >3D Data are not currently supported by cfitsio ), ids=lambda x: f"shape: {x[0]} tile_dims: {x[1]}", ) def array_shapes_tile_dims(request, compression_type): shape, tile_dim = request.param # H_COMPRESS needs >=2D data and always 2D tiles if compression_type == "HCOMPRESS_1": if ( # We don't have at least a 2D image len(shape) < 2 or # We don't have 2D tiles np.count_nonzero(np.array(tile_dim) != 1) != 2 or # TODO: The following restrictions can be lifted with some extra work. # The tile is not the first two dimensions of the data tile_dim[0] == 1 or tile_dim[1] == 1 or # The tile dimensions not an integer multiple of the array dims np.count_nonzero(np.array(shape[:2]) % tile_dim[:2]) != 0 ): pytest.xfail( "HCOMPRESS requires 2D tiles, from the first two" "dimensions, and an integer number of tiles along the first two" "axes." ) return shape, tile_dim @pytest.fixture(scope="module") def tile_dims(array_shapes_tile_dims): return array_shapes_tile_dims[1] @pytest.fixture(scope="module") def data_shape(array_shapes_tile_dims): return array_shapes_tile_dims[0] @pytest.fixture(scope="module") def base_original_data(data_shape, dtype, numpy_rng, compression_type): random = numpy_rng.uniform(high=255, size=data_shape) # Set first value to be exactly zero as zero values require special treatment # for SUBTRACTIVE_DITHER_2 random.ravel()[0] = 0.0 # There seems to be a bug with the fitsio library where HCOMPRESS doesn't # work with int16 random data, so use a bit for structured test data. if compression_type.startswith("HCOMPRESS") and "i2" in dtype or "u1" in dtype: random = np.arange(np.product(data_shape)).reshape(data_shape) return random.astype(dtype) @pytest.fixture(scope="module") def fitsio_compressed_file_path( tmp_path_factory, comp_param_dtype, base_original_data, data_shape, # For debugging tile_dims, ): compression_type, param, dtype = comp_param_dtype if ( base_original_data.ndim > 2 and "u1" in dtype and compression_type == "HCOMPRESS_1" ): pytest.xfail("fitsio won't write these") if compression_type == "PLIO_1" and "f" in dtype: # fitsio fails with a compression error pytest.xfail("fitsio fails to write these") if ( compression_type == "HCOMPRESS_1" and "f" in dtype and param.get("qmethod", None) == 2 ): # fitsio writes these files with very large/incorrect zzero values, whereas # qmethod == 1 works (and the two methods should be identical except for the # treatment of zeros) pytest.xfail("fitsio writes these files with very large/incorrect zzero values") tmp_path = tmp_path_factory.mktemp("fitsio") original_data = base_original_data.astype(dtype) filename = tmp_path / f"{compression_type}_{dtype}.fits" fits = fitsio.FITS(filename, "rw") fits.write(original_data, compress=compression_type, tile_dims=tile_dims, **param) return filename @pytest.fixture(scope="module") def astropy_compressed_file_path( comp_param_dtype, tmp_path_factory, base_original_data, data_shape, # For debugging tile_dims, ): compression_type, param, dtype = comp_param_dtype original_data = base_original_data.astype(dtype) tmp_path = tmp_path_factory.mktemp("astropy") filename = tmp_path / f"{compression_type}_{dtype}.fits" param = fitsio_param_to_astropy_param(param) hdu = fits.CompImageHDU( data=original_data, compression_type=compression_type, # TODO: why does this require a list?? tile_size=list(tile_dims) if tile_dims is not None else tile_dims, **param, ) hdu.writeto(filename) return filename def test_decompress( fitsio_compressed_file_path, comp_param_dtype, ): compression_type, param, dtype = comp_param_dtype with fits.open(fitsio_compressed_file_path) as hdul: data = hdul[1].data assert hdul[1]._header["ZCMPTYPE"].replace("ONE", "1") == compression_type assert hdul[1].data.dtype.kind == np.dtype(dtype).kind assert hdul[1].data.dtype.itemsize == np.dtype(dtype).itemsize # The data might not always match the original data exactly in the case of # lossy compression so instead of comparing the array read by astropy to the # original data, we compare it to the data read in by fitsio (as those # should match) fts = fitsio.FITS(fitsio_compressed_file_path) data2 = fts[1].read() np.testing.assert_allclose(data, data2) # The first value should be exactly equal to zero when using SUBTRACTIVE_DITHER_2 if param.get("qmethod", None) == 2: assert data.ravel()[0] == 0.0 def test_compress( astropy_compressed_file_path, compression_type, dtype, ): fts = fitsio.FITS(astropy_compressed_file_path, "r") header = fts[1].read_header() data = fts[1].read() assert header["ZCMPTYPE"] == compression_type assert data.dtype.kind == np.dtype(dtype).kind assert data.dtype.itemsize == np.dtype(dtype).itemsize # The data might not always match the original data exactly in the case of # lossy compression so instead of comparing the array read by fitsio to the # original data, we compare it to the data read in by astropy (as those # should match) with fits.open(astropy_compressed_file_path) as hdul: np.testing.assert_allclose(data, hdul[1].data)
7880039b0a9bcd7854ee581f3ac5e6c79169cd208e6823500a92bbaa2fd6e976
import itertools import numpy as np import pytest COMPRESSION_TYPES = [ "GZIP_1", "GZIP_2", "RICE_1", "HCOMPRESS_1", "PLIO_1", ] def fitsio_param_to_astropy_param(param): # Convert fitsio kwargs to astropy kwargs _map = {"qlevel": "quantize_level", "qmethod": "quantize_method"} param = {_map[k]: v for k, v in param.items()} # Map quantize_level if param.get("quantize_level", "missing") is None: param["quantize_level"] = 0.0 return param def _expand(*params): """ Expands a list of N iterables of parameters into a flat list with all combinations of all parameters. """ expanded = [] for ele in params: expanded += list(itertools.product(*ele)) return expanded ALL_INTEGER_DTYPES = [ "".join(ele) for ele in _expand([("<", ">"), ("i",), ("2", "4")], [("<", ">"), ("u",), ("1",)]) ] ALL_FLOAT_DTYPES = ["".join(ele) for ele in _expand([("<", ">"), ("f",), ("4", "8")])] @pytest.fixture( scope="session", ids=lambda x: " ".join(map(str, x)), # The params here are compression type, parameters for the compression / # quantise and dtype params=_expand( # Test all compression types with default compression parameters for # all integers [ COMPRESSION_TYPES, ({},), ALL_INTEGER_DTYPES, ], # GZIP supports lossless non-quantized floating point data [ ("GZIP_1", "GZIP_2"), ({"qlevel": None},), ALL_FLOAT_DTYPES, ], # All compression types can also take quantized floating point input # Rather than running all quantization parameters for all algorithms # split up the algorithms to reduce the total number of tests. [ ["GZIP_1", "GZIP_2"], ({"qlevel": 5, "qmethod": -1},), ALL_FLOAT_DTYPES, ], [ ["RICE_1"], ({"qlevel": 10, "qmethod": 1},), ALL_FLOAT_DTYPES, ], [ ["HCOMPRESS_1"], ( {"qlevel": 20, "qmethod": 2}, {"qlevel": 10, "qmethod": 1}, ), ALL_FLOAT_DTYPES, ], # Note no PLIO here as that's intended for masks, i.e. data which can't # be generated with quantization. ), ) def comp_param_dtype(request): return request.param @pytest.fixture(scope="session") def compression_type(comp_param_dtype): return comp_param_dtype[0] @pytest.fixture(scope="session") def compression_param(comp_param_dtype): return comp_param_dtype[1] @pytest.fixture(scope="session") def dtype(comp_param_dtype): return comp_param_dtype[2] @pytest.fixture(scope="session") def numpy_rng(): return np.random.default_rng()
0fba6b8247dd8f78963b5b9f1dfeebd35c399a3090e76f70eae661abf4c050f0
from pathlib import Path import numpy as np import pytest from numpy.testing import assert_allclose, assert_equal from astropy.io import fits from astropy.io.fits._tiled_compression.codecs import PLIO1 from .conftest import fitsio_param_to_astropy_param @pytest.fixture def canonical_data_base_path(): return Path(__file__).parent / "data" @pytest.fixture( params=(Path(__file__).parent / "data").glob("m13_*.fits"), ids=lambda x: x.name ) def canonical_int_hdus(request): """ This fixture provides 4 files downloaded from https://fits.gsfc.nasa.gov/registry/tilecompression.html Which are used as canonical tests of data not compressed by Astropy. """ with fits.open(request.param) as hdul: yield hdul[1] @pytest.fixture def original_int_hdu(canonical_data_base_path): with fits.open(canonical_data_base_path / "m13.fits") as hdul: yield hdul[0] # pytest-openfiles does not correctly check for open files when the files are # opened in a fixture, so we skip the check here. # https://github.com/astropy/pytest-openfiles/issues/32 @pytest.mark.openfiles_ignore def test_canonical_data(original_int_hdu, canonical_int_hdus): assert_allclose(original_int_hdu.data, canonical_int_hdus.data) def test_zblank_support(canonical_data_base_path, tmp_path): # This uses a test 12x12 image which contains a NaN value in the [1, 1] # pixel - it was compressed using fpack which automatically added a ZBLANK # header keyword reference = np.arange(144).reshape((12, 12)).astype(float) reference[1, 1] = np.nan with fits.open(canonical_data_base_path / "compressed_with_nan.fits") as hdul: assert_equal(np.round(hdul[1].data), reference) # Now generate a file ourselves and check that the output has the ZBLANK # keyword set automatically hdu = fits.CompImageHDU(data=reference, compression_type="RICE_1", tile_size=(6, 6)) hdu.writeto(tmp_path / "test_zblank.fits") with fits.open(tmp_path / "test_zblank.fits") as hdul: assert "ZBLANK" in hdul[1].header assert_equal(np.round(hdul[1].data), reference) @pytest.mark.parametrize( ("shape", "tile_dim"), ( ([10, 10], [5, 5]), # something for HCOMPRESS ([5, 5, 5], [5, 5, 5]), # ([5, 5, 5], [5, 5, 1]), # something for HCOMPRESS ([10, 15, 20], [5, 5, 5]), ([10, 5, 12], [5, 5, 5]), # TODO: There's a stupid bit of code in CompImageHDU which stops this working. # ([2, 3, 4, 5], [1, 1, 2, 3]), ([2, 3, 4, 5], [5, 5, 1, 1]), ), ) def test_roundtrip_high_D( numpy_rng, compression_type, compression_param, tmp_path, dtype, shape, tile_dim ): if compression_type == "HCOMPRESS_1" and ( # We don't have at least a 2D image len(shape) < 2 or # We don't have 2D tiles np.count_nonzero(np.array(tile_dim) != 1) != 2 or # TODO: The following restrictions can be lifted with some extra work. # The tile is not the first two dimensions of the data tile_dim[0] == 1 or tile_dim[1] == 1 or # The tile dimensions not an integer multiple of the array dims np.count_nonzero(np.array(shape[:2]) % tile_dim[:2]) != 0 ): pytest.xfail("HCOMPRESS requires 2D tiles.") random = numpy_rng.uniform(high=255, size=shape) # Set first value to be exactly zero as zero values require special treatment # for SUBTRACTIVE_DITHER_2 random.ravel()[0] = 0.0 original_data = random.astype(dtype) dtype_sanitizer = { ">": "big", "<": "little", "=": "native", } filename = ( tmp_path / f"{compression_type}_{dtype[1:]}_{dtype_sanitizer[dtype[0]]}.fits" ) param = fitsio_param_to_astropy_param(compression_param) hdu = fits.CompImageHDU( data=original_data, compression_type=compression_type, tile_size=tile_dim, **param, ) hdu.writeto(filename) atol = 0 if compression_param.get("qmethod", None) is not None: # This is a horrific hack We are comparing quantized data to unquantized # data here, so there can be pretty large differences. What this test # is really checking for is arrays which are *completely* different, # which would indicate the compression has not worked. atol = 17 with fits.open(filename) as hdul: a = hdul[1].data np.testing.assert_allclose(original_data, hdul[1].data, atol=atol) def test_plio_1_out_of_range(): pc = PLIO1(tilesize=10) data = np.arange(-10, 0).astype(np.int32) with pytest.raises(ValueError): pc.encode(data)
a643de49e88bc6c2a3542771a02cac743062239b7152010855a0eef4697a793e
# Licensed under a 3-clause BSD style license - see LICENSE.rst import functools import io import os import re from contextlib import nullcontext from io import BytesIO from textwrap import dedent import numpy as np import pytest from numpy import ma from astropy.io import ascii from astropy.io.ascii.core import ( FastOptionsError, InconsistentTableError, ParameterError, ) from astropy.io.ascii.fastbasic import ( FastBasic, FastCommentedHeader, FastCsv, FastNoHeader, FastRdb, FastTab, ) from astropy.table import MaskedColumn, Table from astropy.utils.data import get_pkg_data_filename from astropy.utils.exceptions import AstropyWarning from .common import assert_almost_equal, assert_equal, assert_true StringIO = lambda x: BytesIO(x.encode("ascii")) CI = os.environ.get("CI", False) def assert_table_equal(t1, t2, check_meta=False, rtol=1.0e-15, atol=1.0e-300): """ Test equality of all columns in a table, with stricter tolerances for float columns than the np.allclose default. """ assert_equal(len(t1), len(t2)) assert_equal(t1.colnames, t2.colnames) if check_meta: assert_equal(t1.meta, t2.meta) for name in t1.colnames: if len(t1) != 0: assert_equal(t1[name].dtype.kind, t2[name].dtype.kind) if not isinstance(t1[name], MaskedColumn): for i, el in enumerate(t1[name]): try: if not isinstance(el, str) and np.isnan(el): assert_true( not isinstance(t2[name][i], str) and np.isnan(t2[name][i]) ) elif isinstance(el, str): assert_equal(el, t2[name][i]) else: assert_almost_equal(el, t2[name][i], rtol=rtol, atol=atol) except (TypeError, NotImplementedError): pass # ignore for now # Use this counter to create a unique filename for each file created in a test # if this function is called more than once in a single test _filename_counter = 0 def _read( tmp_path, table, Reader=None, format=None, parallel=False, check_meta=False, **kwargs, ): # make sure we have a newline so table can't be misinterpreted as a filename global _filename_counter table += "\n" reader = Reader(**kwargs) t1 = reader.read(table) t2 = reader.read(StringIO(table)) t3 = reader.read(table.splitlines()) t4 = ascii.read(table, format=format, guess=False, **kwargs) t5 = ascii.read(table, format=format, guess=False, fast_reader=False, **kwargs) assert_table_equal(t1, t2, check_meta=check_meta) assert_table_equal(t2, t3, check_meta=check_meta) assert_table_equal(t3, t4, check_meta=check_meta) assert_table_equal(t4, t5, check_meta=check_meta) if parallel: if CI: pytest.xfail("Multiprocessing can sometimes fail on CI") t6 = ascii.read( table, format=format, guess=False, fast_reader={"parallel": True}, **kwargs ) assert_table_equal(t1, t6, check_meta=check_meta) filename = tmp_path / f"table{_filename_counter}.txt" _filename_counter += 1 with open(filename, "wb") as f: f.write(table.encode("ascii")) f.flush() t7 = ascii.read(filename, format=format, guess=False, **kwargs) if parallel: t8 = ascii.read( filename, format=format, guess=False, fast_reader={"parallel": True}, **kwargs, ) assert_table_equal(t1, t7, check_meta=check_meta) if parallel: assert_table_equal(t1, t8, check_meta=check_meta) return t1 @pytest.fixture(scope="function") def read_basic(tmp_path, request): return functools.partial(_read, tmp_path, Reader=FastBasic, format="basic") @pytest.fixture(scope="function") def read_csv(tmp_path, request): return functools.partial(_read, tmp_path, Reader=FastCsv, format="csv") @pytest.fixture(scope="function") def read_tab(tmp_path, request): return functools.partial(_read, tmp_path, Reader=FastTab, format="tab") @pytest.fixture(scope="function") def read_commented_header(tmp_path, request): return functools.partial( _read, tmp_path, Reader=FastCommentedHeader, format="commented_header" ) @pytest.fixture(scope="function") def read_rdb(tmp_path, request): return functools.partial(_read, tmp_path, Reader=FastRdb, format="rdb") @pytest.fixture(scope="function") def read_no_header(tmp_path, request): return functools.partial(_read, tmp_path, Reader=FastNoHeader, format="no_header") @pytest.mark.parametrize("delimiter", [",", "\t", " ", "csv"]) @pytest.mark.parametrize("quotechar", ['"', "'"]) @pytest.mark.parametrize("fast", [False, True]) def test_embedded_newlines(delimiter, quotechar, fast): """Test that embedded newlines are supported for io.ascii readers and writers, both fast and Python readers.""" # Start with an assortment of values with different embedded newlines and whitespace dat = [ ["\t a ", " b \n cd ", "\n"], [" 1\n ", '2 \n" \t 3\n4\n5', "1\n '2\n"], [" x,y \nz\t", "\t 12\n\t34\t ", "56\t\n"], ] dat = Table(dat, names=("a", "b", "c")) # Construct a table which is our expected result of writing the table and # reading it back. Certain stripping of whitespace is expected. exp = {} # expected output from reading for col in dat.itercols(): vals = [] for val in col: # Readers and writers both strip whitespace from ends of values val = val.strip(" \t") if not fast: # Pure Python reader has a "feature" where it strips trailing # whitespace from each input line. This means a value like # " x \ny \t\n" gets read as "x\ny". bits = val.splitlines(keepends=True) bits_out = [] for bit in bits: bit = re.sub(r"[ \t]+(\n?)$", r"\1", bit.strip(" \t")) bits_out.append(bit) val = "".join(bits_out) vals.append(val) exp[col.info.name] = vals exp = Table(exp) if delimiter == "csv": format = "csv" delimiter = "," else: format = "basic" # Write the table to `text` fh = io.StringIO() ascii.write( dat, fh, format=format, delimiter=delimiter, quotechar=quotechar, fast_writer=fast, ) text = fh.getvalue() # Read it back and compare to the expected dat_out = ascii.read( text, format=format, guess=False, delimiter=delimiter, quotechar=quotechar, fast_reader=fast, ) eq = dat_out.values_equal(exp) assert all(np.all(col) for col in eq.itercols()) @pytest.mark.parametrize("parallel", [True, False]) def test_simple_data(parallel, read_basic): """ Make sure the fast reader works with basic input data. """ table = read_basic("A B C\n1 2 3\n4 5 6", parallel=parallel) expected = Table([[1, 4], [2, 5], [3, 6]], names=("A", "B", "C")) assert_table_equal(table, expected) def test_read_types(): """ Make sure that the read() function takes filenames, strings, and lists of strings in addition to file-like objects. """ t1 = ascii.read("a b c\n1 2 3\n4 5 6", format="fast_basic", guess=False) # TODO: also read from file t2 = ascii.read(StringIO("a b c\n1 2 3\n4 5 6"), format="fast_basic", guess=False) t3 = ascii.read(["a b c", "1 2 3", "4 5 6"], format="fast_basic", guess=False) assert_table_equal(t1, t2) assert_table_equal(t2, t3) @pytest.mark.parametrize("parallel", [True, False]) def test_supplied_names(parallel, read_basic): """ If passed as a parameter, names should replace any column names found in the header. """ table = read_basic("A B C\n1 2 3\n4 5 6", names=("X", "Y", "Z"), parallel=parallel) expected = Table([[1, 4], [2, 5], [3, 6]], names=("X", "Y", "Z")) assert_table_equal(table, expected) @pytest.mark.parametrize("parallel", [True, False]) def test_no_header(parallel, read_basic, read_no_header): """ The header should not be read when header_start=None. Unless names is passed, the column names should be auto-generated. """ # Cannot set header_start=None for basic format with pytest.raises(ValueError): read_basic( "A B C\n1 2 3\n4 5 6", header_start=None, data_start=0, parallel=parallel ) t2 = read_no_header("A B C\n1 2 3\n4 5 6", parallel=parallel) expected = Table( [["A", "1", "4"], ["B", "2", "5"], ["C", "3", "6"]], names=("col1", "col2", "col3"), ) assert_table_equal(t2, expected) @pytest.mark.parametrize("parallel", [True, False]) def test_no_header_supplied_names(parallel, read_basic, read_no_header): """ If header_start=None and names is passed as a parameter, header data should not be read and names should be used instead. """ table = read_no_header( "A B C\n1 2 3\n4 5 6", names=("X", "Y", "Z"), parallel=parallel ) expected = Table( [["A", "1", "4"], ["B", "2", "5"], ["C", "3", "6"]], names=("X", "Y", "Z") ) assert_table_equal(table, expected) @pytest.mark.parametrize("parallel", [True, False]) def test_comment(parallel, read_basic): """ Make sure that line comments are ignored by the C reader. """ table = read_basic( "# comment\nA B C\n # another comment\n1 2 3\n4 5 6", parallel=parallel ) expected = Table([[1, 4], [2, 5], [3, 6]], names=("A", "B", "C")) assert_table_equal(table, expected) @pytest.mark.parametrize("parallel", [True, False]) def test_empty_lines(parallel, read_basic): """ Make sure that empty lines are ignored by the C reader. """ table = read_basic("\n\nA B C\n1 2 3\n\n\n4 5 6\n\n\n\n", parallel=parallel) expected = Table([[1, 4], [2, 5], [3, 6]], names=("A", "B", "C")) assert_table_equal(table, expected) @pytest.mark.parametrize("parallel", [True, False]) def test_lstrip_whitespace(parallel, read_basic): """ Test to make sure the reader ignores whitespace at the beginning of fields. """ text = """ 1, 2, \t3 A,\t\t B, C a, b, c \n""" table = read_basic(text, delimiter=",", parallel=parallel) expected = Table([["A", "a"], ["B", "b"], ["C", "c"]], names=("1", "2", "3")) assert_table_equal(table, expected) @pytest.mark.parametrize("parallel", [True, False]) def test_rstrip_whitespace(parallel, read_basic): """ Test to make sure the reader ignores whitespace at the end of fields. """ text = " 1 ,2 \t,3 \nA\t,B ,C\t \t \n \ta ,b , c \n" table = read_basic(text, delimiter=",", parallel=parallel) expected = Table([["A", "a"], ["B", "b"], ["C", "c"]], names=("1", "2", "3")) assert_table_equal(table, expected) @pytest.mark.parametrize("parallel", [True, False]) def test_conversion(parallel, read_basic): """ The reader should try to convert each column to ints. If this fails, the reader should try to convert to floats. Failing this, i.e. on parsing non-numeric input including isolated positive/negative signs, it should fall back to strings. """ text = """ A B C D E F G H 1 a 3 4 5 6 7 8 2. 1 9 -.1e1 10.0 8.7 6 -5.3e4 4 2 -12 .4 +.e1 - + six """ table = read_basic(text, parallel=parallel) assert_equal(table["A"].dtype.kind, "f") assert table["B"].dtype.kind in ("S", "U") assert_equal(table["C"].dtype.kind, "i") assert_equal(table["D"].dtype.kind, "f") assert table["E"].dtype.kind in ("S", "U") assert table["F"].dtype.kind in ("S", "U") assert table["G"].dtype.kind in ("S", "U") assert table["H"].dtype.kind in ("S", "U") @pytest.mark.parametrize("parallel", [True, False]) def test_delimiter(parallel, read_basic): """ Make sure that different delimiters work as expected. """ text = dedent( """ COL1 COL2 COL3 1 A -1 2 B -2 """ ) expected = Table([[1, 2], ["A", "B"], [-1, -2]], names=("COL1", "COL2", "COL3")) for sep in " ,\t#;": table = read_basic(text.replace(" ", sep), delimiter=sep, parallel=parallel) assert_table_equal(table, expected) @pytest.mark.parametrize("parallel", [True, False]) def test_include_names(parallel, read_basic): """ If include_names is not None, the parser should read only those columns in include_names. """ table = read_basic( "A B C D\n1 2 3 4\n5 6 7 8", include_names=["A", "D"], parallel=parallel ) expected = Table([[1, 5], [4, 8]], names=("A", "D")) assert_table_equal(table, expected) @pytest.mark.parametrize("parallel", [True, False]) def test_exclude_names(parallel, read_basic): """ If exclude_names is not None, the parser should exclude the columns in exclude_names. """ table = read_basic( "A B C D\n1 2 3 4\n5 6 7 8", exclude_names=["A", "D"], parallel=parallel ) expected = Table([[2, 6], [3, 7]], names=("B", "C")) assert_table_equal(table, expected) @pytest.mark.parametrize("parallel", [True, False]) def test_include_exclude_names(parallel, read_basic): """ Make sure that include_names is applied before exclude_names if both are specified. """ text = dedent( """ A B C D E F G H 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 """ ) table = read_basic( text, include_names=["A", "B", "D", "F", "H"], exclude_names=["B", "F"], parallel=parallel, ) expected = Table([[1, 9], [4, 12], [8, 16]], names=("A", "D", "H")) assert_table_equal(table, expected) def test_doubled_quotes(read_csv): """ Test #8283 (fix for #8281), parsing doubled-quotes "ab""cd" in a quoted field was incorrect. """ # fmt: off tbl = '\n'.join(['a,b', '"d""","d""q"', '"""q",""""']) expected = Table([['d"', '"q'], ['d"q', '"']], names=('a', 'b')) # fmt: on dat = read_csv(tbl) assert_table_equal(dat, expected) # In addition to the local read_csv wrapper, check that default # parsing with guessing gives the right answer. for fast_reader in True, False: dat = ascii.read(tbl, fast_reader=fast_reader) assert_table_equal(dat, expected) @pytest.mark.filterwarnings( "ignore:OverflowError converting to IntType in column TIMESTAMP" ) def test_doubled_quotes_segv(): """ Test the exact example from #8281 which resulted in SEGV prior to #8283 (in contrast to the tests above that just gave the wrong answer). Attempts to produce a more minimal example were unsuccessful, so the whole thing is included. """ tbl = dedent( """ "ID","TIMESTAMP","addendum_id","bib_reference","bib_reference_url","client_application","client_category","client_sort_key","color","coordsys","creator","creator_did","data_pixel_bitpix","dataproduct_subtype","dataproduct_type","em_max","em_min","format","hips_builder","hips_copyright","hips_creation_date","hips_creation_date_1","hips_creator","hips_data_range","hips_estsize","hips_frame","hips_glu_tag","hips_hierarchy","hips_initial_dec","hips_initial_fov","hips_initial_ra","hips_lon_asc","hips_master_url","hips_order","hips_order_1","hips_order_4","hips_order_min","hips_overlay","hips_pixel_bitpix","hips_pixel_cut","hips_pixel_scale","hips_progenitor_url","hips_publisher","hips_release_date","hips_release_date_1","hips_rgb_blue","hips_rgb_green","hips_rgb_red","hips_sampling","hips_service_url","hips_service_url_1","hips_service_url_2","hips_service_url_3","hips_service_url_4","hips_service_url_5","hips_service_url_6","hips_service_url_7","hips_service_url_8","hips_skyval","hips_skyval_method","hips_skyval_value","hips_status","hips_status_1","hips_status_2","hips_status_3","hips_status_4","hips_status_5","hips_status_6","hips_status_7","hips_status_8","hips_tile_format","hips_tile_format_1","hips_tile_format_4","hips_tile_width","hips_version","hipsgen_date","hipsgen_date_1","hipsgen_date_10","hipsgen_date_11","hipsgen_date_12","hipsgen_date_2","hipsgen_date_3","hipsgen_date_4","hipsgen_date_5","hipsgen_date_6","hipsgen_date_7","hipsgen_date_8","hipsgen_date_9","hipsgen_params","hipsgen_params_1","hipsgen_params_10","hipsgen_params_11","hipsgen_params_12","hipsgen_params_2","hipsgen_params_3","hipsgen_params_4","hipsgen_params_5","hipsgen_params_6","hipsgen_params_7","hipsgen_params_8","hipsgen_params_9","label","maxOrder","moc_access_url","moc_order","moc_release_date","moc_sky_fraction","obs_ack","obs_collection","obs_copyrigh_url","obs_copyright","obs_copyright_1","obs_copyright_url","obs_copyright_url_1","obs_description","obs_description_url","obs_descrition_url","obs_id","obs_initial_dec","obs_initial_fov","obs_initial_ra","obs_provenance","obs_regime","obs_title","ohips_frame","pixelCut","pixelRange","prov_did","prov_progenitor","prov_progenitor_url","publisher_did","publisher_id","s_pixel_scale","t_max","t_min" "CDS/P/2MASS/H","1524123841000","","2006AJ....131.1163S","http://cdsbib.u-strasbg.fr/cgi-bin/cdsbib?2006AJ....131.1163S","AladinDesktop","Image/Infrared/2MASS","04-001-03","","","","ivo://CDS/P/2MASS/H","","","image","1.798E-6","1.525E-6","","Aladin/HipsGen v9.017","CNRS/Unistra","2013-05-06T20:36Z","","CDS (A.Oberto)","","","equatorial","","mean","","","","","","9","","","","","","0 60","2.236E-4","","","2016-04-22T13:48Z","","","","","","http://alasky.u-strasbg.fr/2MASS/H","https://irsa.ipac.caltech.edu/data/hips/CDS/2MASS/H","http://alaskybis.u-strasbg.fr/2MASS/H","https://alaskybis.u-strasbg.fr/2MASS/H","","","","","","","","","public master clonableOnce","public mirror unclonable","public mirror clonableOnce","public mirror clonableOnce","","","","","","jpeg fits","","","512","1.31","","","","","","","","","","","","","","","","","","","","","","","","","","","","","http://alasky.u-strasbg.fr/2MASS/H/Moc.fits","9","","1","University of Massachusetts & IPAC/Caltech","The Two Micron All Sky Survey - H band (2MASS H)","","University of Massachusetts & IPAC/Caltech","","http://www.ipac.caltech.edu/2mass/","","2MASS has uniformly scanned the entire sky in three near-infrared bands to detect and characterize point sources brighter than about 1 mJy in each band, with signal-to-noise ratio (SNR) greater than 10, using a pixel size of 2.0"". This has achieved an 80,000-fold improvement in sensitivity relative to earlier surveys. 2MASS used two highly-automated 1.3-m telescopes, one at Mt. Hopkins, AZ, and one at CTIO, Chile. Each telescope was equipped with a three-channel camera, each channel consisting of a 256x256 array of HgCdTe detectors, capable of observing the sky simultaneously at J (1.25 microns), H (1.65 microns), and Ks (2.17 microns). The University of Massachusetts (UMass) was responsible for the overall management of the project, and for developing the infrared cameras and on-site computing systems at both facilities. The Infrared Processing and Analysis Center (IPAC) is responsible for all data processing through the Production Pipeline, and construction and distribution of the data products. Funding is provided primarily by NASA and the NSF","","","","+0","0.11451621372724685","0","","Infrared","2MASS H (1.66um)","","","","","IPAC/NASA","","","","","51941","50600" """ ) ascii.read(tbl, format="csv", fast_reader=True, guess=False) @pytest.mark.parametrize("parallel", [True, False]) def test_quoted_fields(parallel, read_basic): """ The character quotechar (default '"') should denote the start of a field which can contain the field delimiter and newlines. """ if parallel: pytest.xfail("Multiprocessing can fail with quoted fields") text = dedent( """ "A B" C D 1.5 2.1 -37.1 a b " c d" """ ) table = read_basic(text, parallel=parallel) expected = Table( [["1.5", "a"], ["2.1", "b"], ["-37.1", "c\nd"]], names=("A B", "C", "D") ) assert_table_equal(table, expected) table = read_basic(text.replace('"', "'"), quotechar="'", parallel=parallel) assert_table_equal(table, expected) @pytest.mark.parametrize( "key,val", [ ("delimiter", ",,"), # multi-char delimiter ("comment", "##"), # multi-char comment ("data_start", None), # data_start=None ("data_start", -1), # data_start negative ("quotechar", "##"), # multi-char quote signifier ("header_start", -1), # negative header_start ( "converters", {i + 1: ascii.convert_numpy(np.uint) for i in range(3)}, ), # passing converters ("Inputter", ascii.ContinuationLinesInputter), # passing Inputter ("header_Splitter", ascii.DefaultSplitter), # passing Splitter ("data_Splitter", ascii.DefaultSplitter), ], ) def test_invalid_parameters(key, val): """ Make sure the C reader raises an error if passed parameters it can't handle. """ with pytest.raises(ParameterError): FastBasic(**{key: val}).read("1 2 3\n4 5 6") with pytest.raises(ParameterError): ascii.read("1 2 3\n4 5 6", format="fast_basic", guess=False, **{key: val}) def test_invalid_parameters_other(): with pytest.raises(TypeError): FastBasic(foo=7).read("1 2 3\n4 5 6") # unexpected argument with pytest.raises(FastOptionsError): # don't fall back on the slow reader ascii.read("1 2 3\n4 5 6", format="basic", fast_reader={"foo": 7}) with pytest.raises(ParameterError): # Outputter cannot be specified in constructor FastBasic(Outputter=ascii.TableOutputter).read("1 2 3\n4 5 6") def test_too_many_cols1(): """ If a row contains too many columns, the C reader should raise an error. """ text = dedent( """ A B C 1 2 3 4 5 6 7 8 9 10 11 12 13 """ ) with pytest.raises(InconsistentTableError) as e: FastBasic().read(text) assert ( "Number of header columns (3) inconsistent with data columns in data line 2" in str(e.value) ) def test_too_many_cols2(): text = """\ aaa,bbb 1,2, 3,4, """ with pytest.raises(InconsistentTableError) as e: FastCsv().read(text) assert ( "Number of header columns (2) inconsistent with data columns in data line 0" in str(e.value) ) def test_too_many_cols3(): text = """\ aaa,bbb 1,2,, 3,4, """ with pytest.raises(InconsistentTableError) as e: FastCsv().read(text) assert ( "Number of header columns (2) inconsistent with data columns in data line 0" in str(e.value) ) def test_too_many_cols4(): # https://github.com/astropy/astropy/issues/9922 with pytest.raises(InconsistentTableError) as e: ascii.read( get_pkg_data_filename("data/conf_py.txt"), fast_reader=True, guess=True ) assert "Unable to guess table format with the guesses listed below" in str(e.value) @pytest.mark.parametrize("parallel", [True, False]) def test_not_enough_cols(parallel, read_csv): """ If a row does not have enough columns, the FastCsv reader should add empty fields while the FastBasic reader should raise an error. """ text = """ A,B,C 1,2,3 4,5 6,7,8 """ table = read_csv(text, parallel=parallel) assert table["B"][1] is not ma.masked assert table["C"][1] is ma.masked with pytest.raises(InconsistentTableError): table = FastBasic(delimiter=",").read(text) @pytest.mark.parametrize("parallel", [True, False]) def test_data_end(parallel, read_basic, read_rdb): """ The parameter data_end should specify where data reading ends. """ text = """ A B C 1 2 3 4 5 6 7 8 9 10 11 12 """ table = read_basic(text, data_end=3, parallel=parallel) expected = Table([[1, 4], [2, 5], [3, 6]], names=("A", "B", "C")) assert_table_equal(table, expected) # data_end supports negative indexing table = read_basic(text, data_end=-2, parallel=parallel) assert_table_equal(table, expected) text = """ A\tB\tC N\tN\tS 1\t2\ta 3\t4\tb 5\t6\tc """ # make sure data_end works with RDB table = read_rdb(text, data_end=-1, parallel=parallel) expected = Table([[1, 3], [2, 4], ["a", "b"]], names=("A", "B", "C")) assert_table_equal(table, expected) # positive index table = read_rdb(text, data_end=3, parallel=parallel) expected = Table([[1], [2], ["a"]], names=("A", "B", "C")) assert_table_equal(table, expected) # empty table if data_end is too small table = read_rdb(text, data_end=1, parallel=parallel) expected = Table([[], [], []], names=("A", "B", "C")) assert_table_equal(table, expected) @pytest.mark.parametrize("parallel", [True, False]) def test_inf_nan(parallel, read_basic): """ Test that inf and nan-like values are correctly parsed on all platforms. Regression test for https://github.com/astropy/astropy/pull/3525 """ text = dedent( """\ A nan +nan -nan inf infinity +inf +infinity -inf -infinity """ ) expected = Table( { "A": [ np.nan, np.nan, np.nan, np.inf, np.inf, np.inf, np.inf, -np.inf, -np.inf, ] } ) table = read_basic(text, parallel=parallel) assert table["A"].dtype.kind == "f" assert_table_equal(table, expected) @pytest.mark.parametrize("parallel", [True, False]) def test_fill_values(parallel, read_basic): """ Make sure that the parameter fill_values works as intended. If fill_values is not specified, the default behavior should be to convert '' to 0. """ text = """ A, B, C , 2, nan a, -999, -3.4 nan, 5, -9999 8, nan, 7.6e12 """ table = read_basic(text, delimiter=",", parallel=parallel) # The empty value in row A should become a masked '0' assert isinstance(table["A"], MaskedColumn) assert table["A"][0] is ma.masked # '0' rather than 0 because there is a string in the column assert_equal(table["A"].data.data[0], "0") assert table["A"][1] is not ma.masked table = read_basic( text, delimiter=",", fill_values=("-999", "0"), parallel=parallel ) assert isinstance(table["B"], MaskedColumn) assert table["A"][0] is not ma.masked # empty value unaffected assert table["C"][2] is not ma.masked # -9999 is not an exact match assert table["B"][1] is ma.masked # Numeric because the rest of the column contains numeric data assert_equal(table["B"].data.data[1], 0.0) assert table["B"][0] is not ma.masked table = read_basic(text, delimiter=",", fill_values=[], parallel=parallel) # None of the columns should be masked for name in "ABC": assert not isinstance(table[name], MaskedColumn) table = read_basic( text, delimiter=",", fill_values=[("", "0", "A"), ("nan", "999", "A", "C")], parallel=parallel, ) assert np.isnan(table["B"][3]) # nan filling skips column B # should skip masking as well as replacing nan assert table["B"][3] is not ma.masked assert table["A"][0] is ma.masked assert table["A"][2] is ma.masked assert_equal(table["A"].data.data[0], "0") assert_equal(table["A"].data.data[2], "999") assert table["C"][0] is ma.masked assert_almost_equal(table["C"].data.data[0], 999.0) assert_almost_equal(table["C"][1], -3.4) # column is still of type float @pytest.mark.parametrize("parallel", [True, False]) def test_fill_include_exclude_names(parallel, read_csv): """ fill_include_names and fill_exclude_names should filter missing/empty value handling in the same way that include_names and exclude_names filter output columns. """ text = """ A, B, C , 1, 2 3, , 4 5, 5, """ table = read_csv(text, fill_include_names=["A", "B"], parallel=parallel) assert table["A"][0] is ma.masked assert table["B"][1] is ma.masked assert table["C"][2] is not ma.masked # C not in fill_include_names table = read_csv(text, fill_exclude_names=["A", "B"], parallel=parallel) assert table["C"][2] is ma.masked assert table["A"][0] is not ma.masked assert table["B"][1] is not ma.masked # A and B excluded from fill handling table = read_csv( text, fill_include_names=["A", "B"], fill_exclude_names=["B"], parallel=parallel ) assert table["A"][0] is ma.masked # fill_exclude_names applies after fill_include_names assert table["B"][1] is not ma.masked assert table["C"][2] is not ma.masked @pytest.mark.parametrize("parallel", [True, False]) def test_many_rows(parallel, read_basic): """ Make sure memory reallocation works okay when the number of rows is large (so that each column string is longer than INITIAL_COL_SIZE). """ text = "A B C\n" for i in range(500): # create 500 rows text += " ".join([str(i) for i in range(3)]) text += "\n" table = read_basic(text, parallel=parallel) expected = Table([[0] * 500, [1] * 500, [2] * 500], names=("A", "B", "C")) assert_table_equal(table, expected) @pytest.mark.parametrize("parallel", [True, False]) def test_many_columns(parallel, read_basic): """ Make sure memory reallocation works okay when the number of columns is large (so that each header string is longer than INITIAL_HEADER_SIZE). """ # create a string with 500 columns and two data rows text = " ".join([str(i) for i in range(500)]) text += "\n" + text + "\n" + text table = read_basic(text, parallel=parallel) expected = Table([[i, i] for i in range(500)], names=[str(i) for i in range(500)]) assert_table_equal(table, expected) def test_fast_reader(): """ Make sure that ascii.read() works as expected by default and with fast_reader specified. """ text = "a b c\n1 2 3\n4 5 6" with pytest.raises(ParameterError): # C reader can't handle regex comment ascii.read(text, format="fast_basic", guess=False, comment="##") # Enable multiprocessing and the fast converter try: ascii.read( text, format="basic", guess=False, fast_reader={"parallel": True, "use_fast_converter": True}, ) except NotImplementedError: # Might get this on Windows, try without parallel... if os.name == "nt": ascii.read( text, format="basic", guess=False, fast_reader={"parallel": False, "use_fast_converter": True}, ) else: raise # Should raise an error if fast_reader has an invalid key with pytest.raises(FastOptionsError): ascii.read(text, format="fast_basic", guess=False, fast_reader={"foo": True}) # Use the slow reader instead ascii.read(text, format="basic", guess=False, comment="##", fast_reader=False) # Will try the slow reader afterwards by default ascii.read(text, format="basic", guess=False, comment="##") @pytest.mark.parametrize("parallel", [True, False]) def test_read_tab(parallel, read_tab): """ The fast reader for tab-separated values should not strip whitespace, unlike the basic reader. """ if parallel: pytest.xfail("Multiprocessing can fail with quoted fields") text = '1\t2\t3\n a\t b \t\n c\t" d\n e"\t ' table = read_tab(text, parallel=parallel) assert_equal(table["1"][0], " a") # preserve line whitespace assert_equal(table["2"][0], " b ") # preserve field whitespace assert table["3"][0] is ma.masked # empty value should be masked assert_equal(table["2"][1], " d\n e") # preserve whitespace in quoted fields assert_equal(table["3"][1], " ") # preserve end-of-line whitespace @pytest.mark.parametrize("parallel", [True, False]) def test_default_data_start(parallel, read_basic): """ If data_start is not explicitly passed to read(), data processing should beginning right after the header. """ text = "ignore this line\na b c\n1 2 3\n4 5 6" table = read_basic(text, header_start=1, parallel=parallel) expected = Table([[1, 4], [2, 5], [3, 6]], names=("a", "b", "c")) assert_table_equal(table, expected) @pytest.mark.parametrize("parallel", [True, False]) def test_commented_header(parallel, read_commented_header): """ The FastCommentedHeader reader should mimic the behavior of the CommentedHeader by overriding the default header behavior of FastBasic. """ text = """ # A B C 1 2 3 4 5 6 """ t1 = read_commented_header(text, parallel=parallel) expected = Table([[1, 4], [2, 5], [3, 6]], names=("A", "B", "C")) assert_table_equal(t1, expected) text = "# first commented line\n # second commented line\n\n" + text t2 = read_commented_header(text, header_start=2, data_start=0, parallel=parallel) assert_table_equal(t2, expected) # negative indexing allowed t3 = read_commented_header(text, header_start=-1, data_start=0, parallel=parallel) assert_table_equal(t3, expected) text += "7 8 9" t4 = read_commented_header(text, header_start=2, data_start=2, parallel=parallel) expected = Table([[7], [8], [9]], names=("A", "B", "C")) assert_table_equal(t4, expected) with pytest.raises(ParameterError): # data_start cannot be negative read_commented_header(text, header_start=-1, data_start=-1, parallel=parallel) @pytest.mark.parametrize("parallel", [True, False]) def test_rdb(parallel, read_rdb): """ Make sure the FastRdb reader works as expected. """ text = """ A\tB\tC 1n\tS\t4N 1\t 9\t4.3 """ table = read_rdb(text, parallel=parallel) expected = Table([[1], [" 9"], [4.3]], names=("A", "B", "C")) assert_table_equal(table, expected) assert_equal(table["A"].dtype.kind, "i") assert table["B"].dtype.kind in ("S", "U") assert_equal(table["C"].dtype.kind, "f") with pytest.raises(ValueError) as e: text = "A\tB\tC\nN\tS\tN\n4\tb\ta" # C column contains non-numeric data read_rdb(text, parallel=parallel) assert "Column C failed to convert" in str(e.value) with pytest.raises(ValueError) as e: text = "A\tB\tC\nN\tN\n1\t2\t3" # not enough types specified read_rdb(text, parallel=parallel) assert "mismatch between number of column names and column types" in str(e.value) with pytest.raises(ValueError) as e: text = "A\tB\tC\nN\tN\t5\n1\t2\t3" # invalid type for column C read_rdb(text, parallel=parallel) assert "type definitions do not all match [num](N|S)" in str(e.value) @pytest.mark.parametrize("parallel", [True, False]) def test_data_start(parallel, read_basic): """ Make sure that data parsing begins at data_start (ignoring empty and commented lines but not taking quoted values into account). """ if parallel: pytest.xfail("Multiprocessing can fail with quoted fields") text = """ A B C 1 2 3 4 5 6 7 8 "9 1" # comment 10 11 12 """ table = read_basic(text, data_start=2, parallel=parallel) expected = Table( [[4, 7, 10], [5, 8, 11], ["6", "9\n1", "12"]], names=("A", "B", "C") ) assert_table_equal(table, expected) table = read_basic(text, data_start=3, parallel=parallel) # ignore empty line expected = Table([[7, 10], [8, 11], ["9\n1", "12"]], names=("A", "B", "C")) assert_table_equal(table, expected) with pytest.raises(InconsistentTableError) as e: # tries to begin in the middle of quoted field read_basic(text, data_start=4, parallel=parallel) assert "header columns (3) inconsistent with data columns in data line 0" in str( e.value ) table = read_basic(text, data_start=5, parallel=parallel) # ignore commented line expected = Table([[10], [11], [12]], names=("A", "B", "C")) assert_table_equal(table, expected) text = """ A B C 1 2 3 4 5 6 7 8 9 # comment 10 11 12 """ # make sure reading works as expected in parallel table = read_basic(text, data_start=2, parallel=parallel) expected = Table([[4, 7, 10], [5, 8, 11], [6, 9, 12]], names=("A", "B", "C")) assert_table_equal(table, expected) @pytest.mark.parametrize("parallel", [True, False]) def test_quoted_empty_values(parallel, read_basic): """ Quoted empty values spanning multiple lines should be treated correctly. """ if parallel: pytest.xfail("Multiprocessing can fail with quoted fields") text = 'a b c\n1 2 " \n "' table = read_basic(text, parallel=parallel) assert table["c"][0] == "\n" # empty value masked by default @pytest.mark.parametrize("parallel", [True, False]) def test_csv_comment_default(parallel, read_csv): """ Unless the comment parameter is specified, the CSV reader should not treat any lines as comments. """ text = "a,b,c\n#1,2,3\n4,5,6" table = read_csv(text, parallel=parallel) expected = Table([["#1", "4"], [2, 5], [3, 6]], names=("a", "b", "c")) assert_table_equal(table, expected) @pytest.mark.parametrize("parallel", [True, False]) def test_whitespace_before_comment(parallel, read_tab): """ Readers that don't strip whitespace from data (Tab, RDB) should still treat lines with leading whitespace and then the comment char as comment lines. """ text = "a\tb\tc\n # comment line\n1\t2\t3" table = read_tab(text, parallel=parallel) expected = Table([[1], [2], [3]], names=("a", "b", "c")) assert_table_equal(table, expected) @pytest.mark.parametrize("parallel", [True, False]) def test_strip_line_trailing_whitespace(parallel, read_basic): """ Readers that strip whitespace from lines should ignore trailing whitespace after the last data value of each row. """ text = "a b c\n1 2 \n3 4 5" with pytest.raises(InconsistentTableError) as e: ascii.read(StringIO(text), format="fast_basic", guess=False) assert "header columns (3) inconsistent with data columns in data line 0" in str( e.value ) text = "a b c\n 1 2 3 \t \n 4 5 6 " table = read_basic(text, parallel=parallel) expected = Table([[1, 4], [2, 5], [3, 6]], names=("a", "b", "c")) assert_table_equal(table, expected) @pytest.mark.parametrize("parallel", [True, False]) def test_no_data(parallel, read_basic): """ As long as column names are supplied, the C reader should return an empty table in the absence of data. """ table = read_basic("a b c", parallel=parallel) expected = Table([[], [], []], names=("a", "b", "c")) assert_table_equal(table, expected) table = read_basic("a b c\n1 2 3", data_start=2, parallel=parallel) assert_table_equal(table, expected) @pytest.mark.parametrize("parallel", [True, False]) def test_line_endings(parallel, read_basic, read_commented_header, read_rdb): """ Make sure the fast reader accepts CR and CR+LF as newlines. """ text = "a b c\n1 2 3\n4 5 6\n7 8 9\n" expected = Table([[1, 4, 7], [2, 5, 8], [3, 6, 9]], names=("a", "b", "c")) for newline in ("\r\n", "\r"): table = read_basic(text.replace("\n", newline), parallel=parallel) assert_table_equal(table, expected) # Make sure the splitlines() method of FileString # works with CR/CR+LF line endings text = "#" + text for newline in ("\r\n", "\r"): table = read_commented_header(text.replace("\n", newline), parallel=parallel) assert_table_equal(table, expected) expected = Table( [MaskedColumn([1, 4, 7]), [2, 5, 8], MaskedColumn([3, 6, 9])], names=("a", "b", "c"), ) expected["a"][0] = np.ma.masked expected["c"][0] = np.ma.masked text = "a\tb\tc\nN\tN\tN\n\t2\t\n4\t5\t6\n7\t8\t9\n" for newline in ("\r\n", "\r"): table = read_rdb(text.replace("\n", newline), parallel=parallel) assert_table_equal(table, expected) assert np.all(table == expected) @pytest.mark.parametrize("parallel", [True, False]) def test_store_comments(parallel, read_basic): """ Make sure that the output Table produced by the fast reader stores any comment lines in its meta attribute. """ text = """ # header comment a b c # comment 2 # comment 3 1 2 3 4 5 6 """ table = read_basic(text, parallel=parallel, check_meta=True) assert_equal(table.meta["comments"], ["header comment", "comment 2", "comment 3"]) @pytest.mark.parametrize("parallel", [True, False]) def test_empty_quotes(parallel, read_basic): """ Make sure the C reader doesn't segfault when the input data contains empty quotes. [#3407] """ table = read_basic('a b\n1 ""\n2 ""', parallel=parallel) expected = Table([[1, 2], [0, 0]], names=("a", "b")) assert_table_equal(table, expected) @pytest.mark.parametrize("parallel", [True, False]) def test_fast_tab_with_names(parallel, read_tab): """ Make sure the C reader doesn't segfault when the header for the first column is missing [#3545] """ content = """# \tdecDeg\tRate_pn_offAxis\tRate_mos2_offAxis\tObsID\tSourceID\tRADeg\tversion\tCounts_pn\tRate_pn\trun\tRate_mos1\tRate_mos2\tInserted_pn\tInserted_mos2\tbeta\tRate_mos1_offAxis\trcArcsec\tname\tInserted\tCounts_mos1\tInserted_mos1\tCounts_mos2\ty\tx\tCounts\toffAxis\tRot -3.007559\t0.0000\t0.0010\t0013140201\t0\t213.462574\t0\t2\t0.0002\t0\t0.0001\t0.0001\t0\t1\t0.66\t0.0217\t3.0\tfakeXMMXCS J1413.8-0300\t3\t1\t2\t1\t398.000\t127.000\t5\t13.9\t72.3\t""" head = [f"A{i}" for i in range(28)] read_tab(content, data_start=1, parallel=parallel, names=head) @pytest.mark.hugemem def test_read_big_table(tmp_path): """Test reading of a huge file. This test generates a huge CSV file (~2.3Gb) before reading it (see https://github.com/astropy/astropy/pull/5319). The test is run only if the ``--run-hugemem`` cli option is given. Note that running the test requires quite a lot of memory (~18Gb when reading the file) !! """ NB_ROWS = 250000 NB_COLS = 500 filename = tmp_path / "big_table.csv" print(f"Creating a {NB_ROWS} rows table ({NB_COLS} columns).") data = np.random.random(NB_ROWS) t = Table(data=[data] * NB_COLS, names=[str(i) for i in range(NB_COLS)]) data = None print(f"Saving the table to {filename}") t.write(filename, format="ascii.csv", overwrite=True) t = None print( "Counting the number of lines in the csv, it should be {NB_ROWS} + 1 (header)." ) with open(filename) as f: assert sum(1 for line in f) == NB_ROWS + 1 print("Reading the file with astropy.") t = Table.read(filename, format="ascii.csv", fast_reader=True) assert len(t) == NB_ROWS @pytest.mark.hugemem def test_read_big_table2(tmp_path): """Test reading of a file with a huge column.""" # (2**32 // 2) : max value for int # // 10 : we use a value for rows that have 10 chars (1e9) # + 5 : add a few lines so the length cannot be stored by an int NB_ROWS = 2**32 // 2 // 10 + 5 filename = tmp_path / "big_table.csv" print(f"Creating a {NB_ROWS} rows table.") data = np.full(NB_ROWS, int(1e9), dtype=np.int32) t = Table(data=[data], names=["a"], copy=False) print(f"Saving the table to {filename}") t.write(filename, format="ascii.csv", overwrite=True) t = None print( "Counting the number of lines in the csv, it should be {NB_ROWS} + 1 (header)." ) with open(filename) as f: assert sum(1 for line in f) == NB_ROWS + 1 print("Reading the file with astropy.") t = Table.read(filename, format="ascii.csv", fast_reader=True) assert len(t) == NB_ROWS # Test these both with guessing turned on and off @pytest.mark.parametrize("guess", [True, False]) # fast_reader configurations: False| 'use_fast_converter'=False|True @pytest.mark.parametrize( "fast_reader", [False, dict(use_fast_converter=False), dict(use_fast_converter=True)], ) @pytest.mark.parametrize("parallel", [False, True]) def test_data_out_of_range(parallel, fast_reader, guess): """ Numbers with exponents beyond float64 range (|~4.94e-324 to 1.7977e+308|) shall be returned as 0 and +-inf respectively by the C parser, just like the Python parser. Test fast converter only to nominal accuracy. """ # Python reader and strtod() are expected to return precise results rtol = 1.0e-30 # Update fast_reader dict; adapt relative precision for fast_converter if fast_reader: fast_reader["parallel"] = parallel if fast_reader.get("use_fast_converter"): rtol = 1.0e-15 elif np.iinfo(np.int_).dtype == np.dtype(np.int32): # On 32bit the standard C parser (strtod) returns strings for these pytest.xfail("C parser cannot handle float64 on 32bit systems") if parallel: if not fast_reader: pytest.skip("Multiprocessing only available in fast reader") elif CI: pytest.xfail("Multiprocessing can sometimes fail on CI") test_for_warnings = fast_reader and not parallel if not parallel and not fast_reader: ctx = nullcontext() else: ctx = pytest.warns() fields = ["10.1E+199", "3.14e+313", "2048e+306", "0.6E-325", "-2.e345"] values = np.array([1.01e200, np.inf, np.inf, 0.0, -np.inf]) # NOTE: Warning behavior varies for the parameters being passed in. with ctx as w: t = ascii.read( StringIO(" ".join(fields)), format="no_header", guess=guess, fast_reader=fast_reader, ) if test_for_warnings: # Assert precision warnings for cols 2-5 assert len(w) == 4 for i in range(len(w)): assert f"OverflowError converting to FloatType in column col{i+2}" in str( w[i].message ) read_values = np.array([col[0] for col in t.itercols()]) assert_almost_equal(read_values, values, rtol=rtol, atol=1.0e-324) # Test some additional corner cases fields = [ ".0101E202", "0.000000314E+314", "1777E+305", "-1799E+305", "0.2e-323", "5200e-327", " 0.0000000000000000000001024E+330", ] values = np.array( [1.01e200, 3.14e307, 1.777e308, -np.inf, 0.0, 4.94e-324, 1.024e308] ) with ctx as w: t = ascii.read( StringIO(" ".join(fields)), format="no_header", guess=guess, fast_reader=fast_reader, ) if test_for_warnings: # Assert precision warnings for cols 4-6 assert len(w) == 3 for i in range(len(w)): assert f"OverflowError converting to FloatType in column col{i+4}" in str( w[i].message ) read_values = np.array([col[0] for col in t.itercols()]) assert_almost_equal(read_values, values, rtol=rtol, atol=1.0e-324) # Test corner cases again with non-standard exponent_style (auto-detection) if fast_reader and fast_reader.get("use_fast_converter"): fast_reader.update({"exponent_style": "A"}) else: pytest.skip("Fortran exponent style only available in fast converter") fields = [ ".0101D202", "0.000000314d+314", "1777+305", "-1799E+305", "0.2e-323", "2500-327", " 0.0000000000000000000001024Q+330", ] with ctx as w: t = ascii.read( StringIO(" ".join(fields)), format="no_header", guess=guess, fast_reader=fast_reader, ) if test_for_warnings: assert len(w) == 3 read_values = np.array([col[0] for col in t.itercols()]) assert_almost_equal(read_values, values, rtol=rtol, atol=1.0e-324) @pytest.mark.parametrize("guess", [True, False]) # fast_reader configurations: False| 'use_fast_converter'=False|True @pytest.mark.parametrize( "fast_reader", [False, dict(use_fast_converter=False), dict(use_fast_converter=True)], ) @pytest.mark.parametrize("parallel", [False, True]) def test_data_at_range_limit(parallel, fast_reader, guess): """ Test parsing of fixed-format float64 numbers near range limits (|~4.94e-324 to 1.7977e+308|) - within limit for full precision (|~2.5e-307| for strtod C parser, factor 10 better for fast_converter) exact numbers shall be returned, beyond that an Overflow warning raised. Input of exactly 0.0 must not raise an OverflowError. """ # Python reader and strtod() are expected to return precise results rtol = 1.0e-30 # Update fast_reader dict; adapt relative precision for fast_converter if fast_reader: fast_reader["parallel"] = parallel if fast_reader.get("use_fast_converter"): rtol = 1.0e-15 elif np.iinfo(np.int_).dtype == np.dtype(np.int32): # On 32bit the standard C parser (strtod) returns strings for these pytest.xfail("C parser cannot handle float64 on 32bit systems") if parallel: if not fast_reader: pytest.skip("Multiprocessing only available in fast reader") elif CI: pytest.xfail("Multiprocessing can sometimes fail on CI") # Test very long fixed-format strings (to strtod range limit w/o Overflow) for D in 99, 202, 305: t = ascii.read( StringIO(99 * "0" + "." + D * "0" + "1"), format="no_header", guess=guess, fast_reader=fast_reader, ) assert_almost_equal(t["col1"][0], 10.0 ** -(D + 1), rtol=rtol, atol=1.0e-324) for D in 99, 202, 308: t = ascii.read( StringIO("1" + D * "0" + ".0"), format="no_header", guess=guess, fast_reader=fast_reader, ) assert_almost_equal(t["col1"][0], 10.0**D, rtol=rtol, atol=1.0e-324) # 0.0 is always exact (no Overflow warning)! for s in "0.0", "0.0e+0", 399 * "0" + "." + 365 * "0": t = ascii.read( StringIO(s), format="no_header", guess=guess, fast_reader=fast_reader ) assert t["col1"][0] == 0.0 # Test OverflowError at precision limit with laxer rtol if parallel: pytest.skip("Catching warnings broken in parallel mode") elif not fast_reader: pytest.skip("Python/numpy reader does not raise on Overflow") with pytest.warns() as warning_lines: t = ascii.read( StringIO("0." + 314 * "0" + "1"), format="no_header", guess=guess, fast_reader=fast_reader, ) n_warns = len(warning_lines) assert n_warns in (0, 1), f"Expected 0 or 1 warning, found {n_warns}" if n_warns == 1: assert ( "OverflowError converting to FloatType in column col1, possibly " "resulting in degraded precision" in str(warning_lines[0].message) ) assert_almost_equal(t["col1"][0], 1.0e-315, rtol=1.0e-10, atol=1.0e-324) @pytest.mark.parametrize("guess", [True, False]) @pytest.mark.parametrize("parallel", [False, True]) def test_int_out_of_range(parallel, guess): """ Integer numbers outside int range shall be returned as string columns consistent with the standard (Python) parser (no 'upcasting' to float). """ imin = np.iinfo(int).min + 1 imax = np.iinfo(int).max - 1 huge = f"{imax+2:d}" text = f"P M S\n {imax:d} {imin:d} {huge:s}" expected = Table([[imax], [imin], [huge]], names=("P", "M", "S")) # NOTE: Warning behavior varies for the parameters being passed in. with pytest.warns() as w: table = ascii.read( text, format="basic", guess=guess, fast_reader={"parallel": parallel} ) if not parallel: assert len(w) == 1 assert ( "OverflowError converting to IntType in column S, reverting to String" in str(w[0].message) ) assert_table_equal(table, expected) # Check with leading zeroes to make sure strtol does not read them as octal text = f"P M S\n000{imax:d} -0{-imin:d} 00{huge:s}" expected = Table([[imax], [imin], ["00" + huge]], names=("P", "M", "S")) with pytest.warns() as w: table = ascii.read( text, format="basic", guess=guess, fast_reader={"parallel": parallel} ) if not parallel: assert len(w) == 1 assert ( "OverflowError converting to IntType in column S, reverting to String" in str(w[0].message) ) assert_table_equal(table, expected) @pytest.mark.parametrize("guess", [True, False]) def test_int_out_of_order(guess): """ Mixed columns should be returned as float, but if the out-of-range integer shows up first, it will produce a string column - with both readers. Broken with the parallel fast_reader. """ imax = np.iinfo(int).max - 1 text = f"A B\n 12.3 {imax:d}0\n {imax:d}0 45.6e7" expected = Table([[12.3, 10.0 * imax], [f"{imax:d}0", "45.6e7"]], names=("A", "B")) with pytest.warns( AstropyWarning, match=r"OverflowError converting to " r"IntType in column B, reverting to String", ): table = ascii.read(text, format="basic", guess=guess, fast_reader=True) assert_table_equal(table, expected) with pytest.warns( AstropyWarning, match=r"OverflowError converting to " r"IntType in column B, reverting to String", ): table = ascii.read(text, format="basic", guess=guess, fast_reader=False) assert_table_equal(table, expected) @pytest.mark.parametrize("guess", [True, False]) @pytest.mark.parametrize("parallel", [False, True]) def test_fortran_reader(parallel, guess): """ Make sure that ascii.read() can read Fortran-style exponential notation using the fast_reader. """ # Check for nominal np.float64 precision rtol = 1.0e-15 atol = 0.0 text = ( "A B C D\n100.01{:s}99 2.0 2.0{:s}-103 3\n" + " 4.2{:s}-1 5.0{:s}-1 0.6{:s}4 .017{:s}+309" ) expc = Table( [[1.0001e101, 0.42], [2, 0.5], [2.0e-103, 6.0e3], [3, 1.7e307]], names=("A", "B", "C", "D"), ) expstyles = { "e": 6 * "E", "D": ("D", "d", "d", "D", "d", "D"), "Q": 3 * ("q", "Q"), "Fortran": ("E", "0", "D", "Q", "d", "0"), } # C strtod (not-fast converter) can't handle Fortran exp with pytest.raises(FastOptionsError) as e: ascii.read( text.format(*(6 * "D")), format="basic", guess=guess, fast_reader={ "use_fast_converter": False, "parallel": parallel, "exponent_style": "D", }, ) assert "fast_reader: exponent_style requires use_fast_converter" in str(e.value) # Enable multiprocessing and the fast converter iterate over # all style-exponent combinations, with auto-detection for s, c in expstyles.items(): table = ascii.read( text.format(*c), guess=guess, fast_reader={"parallel": parallel, "exponent_style": s}, ) assert_table_equal(table, expc, rtol=rtol, atol=atol) # Additional corner-case checks including triple-exponents without # any character and mixed whitespace separators text = ( "A B\t\t C D\n1.0001+101 2.0+000\t 0.0002-099 3\n " + "0.42-000 \t 0.5 6.+003 0.000000000000000000000017+330" ) table = ascii.read( text, guess=guess, fast_reader={"parallel": parallel, "exponent_style": "A"} ) assert_table_equal(table, expc, rtol=rtol, atol=atol) @pytest.mark.parametrize("guess", [True, False]) @pytest.mark.parametrize("parallel", [False, True]) def test_fortran_invalid_exp(parallel, guess): """ Test Fortran-style exponential notation in the fast_reader with invalid exponent-like patterns (no triple-digits) to make sure they are returned as strings instead, as with the standard C parser. """ if parallel and CI: pytest.xfail("Multiprocessing can sometimes fail on CI") formats = {"basic": " ", "tab": "\t", "csv": ","} header = ["S1", "F2", "S2", "F3", "S3", "F4", "F5", "S4", "I1", "F6", "F7"] # Tested entries and expected returns, first for auto-detect, # then for different specified exponents # fmt: off fields = ['1.0001+1', '.42d1', '2.3+10', '0.5', '3+1001', '3000.', '2', '4.56e-2.3', '8000', '4.2-022', '.00000145e314'] vals_e = ['1.0001+1', '.42d1', '2.3+10', 0.5, '3+1001', 3.e3, 2, '4.56e-2.3', 8000, '4.2-022', 1.45e308] vals_d = ['1.0001+1', 4.2, '2.3+10', 0.5, '3+1001', 3.e3, 2, '4.56e-2.3', 8000, '4.2-022', '.00000145e314'] vals_a = ['1.0001+1', 4.2, '2.3+10', 0.5, '3+1001', 3.e3, 2, '4.56e-2.3', 8000, 4.2e-22, 1.45e308] vals_v = ['1.0001+1', 4.2, '2.3+10', 0.5, '3+1001', 3.e3, 2, '4.56e-2.3', 8000, '4.2-022', 1.45e308] # fmt: on # Iterate over supported format types and separators for f, s in formats.items(): t1 = ascii.read( StringIO(s.join(header) + "\n" + s.join(fields)), format=f, guess=guess, fast_reader={"parallel": parallel, "exponent_style": "A"}, ) assert_table_equal(t1, Table([[col] for col in vals_a], names=header)) # Non-basic separators require guessing enabled to be detected if guess: formats["bar"] = "|" else: formats = {"basic": " "} for s in formats.values(): t2 = ascii.read( StringIO(s.join(header) + "\n" + s.join(fields)), guess=guess, fast_reader={"parallel": parallel, "exponent_style": "a"}, ) assert_table_equal(t2, Table([[col] for col in vals_a], names=header)) # Iterate for (default) expchar 'E' for s in formats.values(): t3 = ascii.read( StringIO(s.join(header) + "\n" + s.join(fields)), guess=guess, fast_reader={"parallel": parallel, "use_fast_converter": True}, ) assert_table_equal(t3, Table([[col] for col in vals_e], names=header)) # Iterate for expchar 'D' for s in formats.values(): t4 = ascii.read( StringIO(s.join(header) + "\n" + s.join(fields)), guess=guess, fast_reader={"parallel": parallel, "exponent_style": "D"}, ) assert_table_equal(t4, Table([[col] for col in vals_d], names=header)) # Iterate for regular converter (strtod) for s in formats.values(): t5 = ascii.read( StringIO(s.join(header) + "\n" + s.join(fields)), guess=guess, fast_reader={"parallel": parallel, "use_fast_converter": False}, ) read_values = [col[0] for col in t5.itercols()] if os.name == "nt": # Apparently C strtod() on (some?) MSVC recognizes 'd' exponents! assert read_values in (vals_v, vals_e) else: assert read_values == vals_e def test_fortran_reader_notbasic(): """ Check if readers without a fast option raise a value error when a fast_reader is asked for (implies the default 'guess=True'). """ tabstr = dedent( """ a b 1 1.23D4 2 5.67D-8 """ )[1:-1] t1 = ascii.read(tabstr.split("\n"), fast_reader=dict(exponent_style="D")) assert t1["b"].dtype.kind == "f" tabrdb = dedent( """ a\tb # A simple RDB table N\tN 1\t 1.23D4 2\t 5.67-008 """ )[1:-1] t2 = ascii.read( tabrdb.split("\n"), format="rdb", fast_reader=dict(exponent_style="fortran") ) assert t2["b"].dtype.kind == "f" tabrst = dedent( """ = ======= a b = ======= 1 1.23E4 2 5.67E-8 = ======= """ )[1:-1] t3 = ascii.read(tabrst.split("\n"), format="rst") assert t3["b"].dtype.kind == "f" t4 = ascii.read(tabrst.split("\n"), guess=True) assert t4["b"].dtype.kind == "f" # In the special case of fast_converter=True (the default), # incompatibility is ignored t5 = ascii.read(tabrst.split("\n"), format="rst", fast_reader=True) assert t5["b"].dtype.kind == "f" with pytest.raises(ParameterError): ascii.read(tabrst.split("\n"), format="rst", guess=False, fast_reader="force") with pytest.raises(ParameterError): ascii.read( tabrst.split("\n"), format="rst", guess=False, fast_reader=dict(use_fast_converter=False), ) tabrst = tabrst.replace("E", "D") with pytest.raises(ParameterError): ascii.read( tabrst.split("\n"), format="rst", guess=False, fast_reader=dict(exponent_style="D"), ) @pytest.mark.parametrize("guess", [True, False]) @pytest.mark.parametrize( "fast_reader", [dict(exponent_style="D"), dict(exponent_style="A")] ) def test_dict_kwarg_integrity(fast_reader, guess): """ Check if dictionaries passed as kwargs (fast_reader in this test) are left intact by ascii.read() """ expstyle = fast_reader.get("exponent_style", "E") fields = ["10.1D+199", "3.14d+313", "2048d+306", "0.6D-325", "-2.d345"] ascii.read(StringIO(" ".join(fields)), guess=guess, fast_reader=fast_reader) assert fast_reader.get("exponent_style", None) == expstyle @pytest.mark.parametrize( "fast_reader", [False, dict(parallel=True), dict(parallel=False)] ) def test_read_empty_basic_table_with_comments(fast_reader): """ Test for reading a "basic" format table that has no data but has comments. Tests the fix for #8267. """ dat = """ # comment 1 # comment 2 col1 col2 """ t = ascii.read(dat, fast_reader=fast_reader) assert t.meta["comments"] == ["comment 1", "comment 2"] assert len(t) == 0 assert t.colnames == ["col1", "col2"] @pytest.mark.parametrize( "fast_reader", [dict(use_fast_converter=True), dict(exponent_style="A")] ) def test_conversion_fast(fast_reader): """ The reader should try to convert each column to ints. If this fails, the reader should try to convert to floats. Failing this, i.e. on parsing non-numeric input including isolated positive/negative signs, it should fall back to strings. """ text = """ A B C D E F G H 1 a 3 4 5 6 7 8 2. 1 9 -.1e1 10.0 8.7 6 -5.3e4 4 2 -12 .4 +.e1 - + six """ table = ascii.read(text, fast_reader=fast_reader) assert_equal(table["A"].dtype.kind, "f") assert table["B"].dtype.kind in ("S", "U") assert_equal(table["C"].dtype.kind, "i") assert_equal(table["D"].dtype.kind, "f") assert table["E"].dtype.kind in ("S", "U") assert table["F"].dtype.kind in ("S", "U") assert table["G"].dtype.kind in ("S", "U") assert table["H"].dtype.kind in ("S", "U") @pytest.mark.parametrize("delimiter", ["\n", "\r"]) @pytest.mark.parametrize("fast_reader", [False, True, "force"]) def test_newline_as_delimiter(delimiter, fast_reader): """ Check that newline characters are correctly handled as delimiters. Tests the fix for #9928. """ if delimiter == "\r": eol = "\n" else: eol = "\r" inp0 = ["a | b | c ", " 1 | '2' | 3.00000 "] inp1 = "a {0:s} b {0:s}c{1:s} 1 {0:s}'2'{0:s} 3.0".format(delimiter, eol) inp2 = [f"a {delimiter} b{delimiter} c", f"1{delimiter} '2' {delimiter} 3.0"] t0 = ascii.read(inp0, delimiter="|", fast_reader=fast_reader) t1 = ascii.read(inp1, delimiter=delimiter, fast_reader=fast_reader) t2 = ascii.read(inp2, delimiter=delimiter, fast_reader=fast_reader) assert t1.colnames == t2.colnames == ["a", "b", "c"] assert len(t1) == len(t2) == 1 assert t1["b"].dtype.kind in ("S", "U") assert t2["b"].dtype.kind in ("S", "U") assert_table_equal(t1, t0) assert_table_equal(t2, t0) inp0 = 'a {0:s} b {0:s} c{1:s} 1 {0:s}"2"{0:s} 3.0'.format("|", eol) inp1 = 'a {0:s} b {0:s} c{1:s} 1 {0:s}"2"{0:s} 3.0'.format(delimiter, eol) t0 = ascii.read(inp0, delimiter="|", fast_reader=fast_reader) t1 = ascii.read(inp1, delimiter=delimiter, fast_reader=fast_reader) if not fast_reader: pytest.xfail("Quoted fields are not parsed correctly by BaseSplitter") assert_equal(t1["b"].dtype.kind, "i") @pytest.mark.parametrize("delimiter", [" ", "|", "\n", "\r"]) @pytest.mark.parametrize("fast_reader", [False, True, "force"]) def test_single_line_string(delimiter, fast_reader): """ String input without a newline character is interpreted as filename, unless element of an iterable. Maybe not logical, but test that it is at least treated consistently. """ expected = Table([[1], [2], [3.00]], names=("col1", "col2", "col3")) text = "1{0:s}2{0:s}3.0".format(delimiter) if delimiter in ("\r", "\n"): t1 = ascii.read( text, format="no_header", delimiter=delimiter, fast_reader=fast_reader ) assert_table_equal(t1, expected) else: # Windows raises OSError, but not the other OSes. with pytest.raises((FileNotFoundError, OSError)): t1 = ascii.read( text, format="no_header", delimiter=delimiter, fast_reader=fast_reader ) t2 = ascii.read( [text], format="no_header", delimiter=delimiter, fast_reader=fast_reader ) assert_table_equal(t2, expected)
fa4a05b195ee1deec5da38e6542430007335cf726ca7e0e08a8a25bfefe8c628
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module tests some of the methods related to the ``HTML`` reader/writer and aims to document its functionality. Requires `BeautifulSoup <http://www.crummy.com/software/BeautifulSoup/>`_ to be installed. """ from io import StringIO import numpy as np import pytest from astropy.io import ascii from astropy.io.ascii import core, html from astropy.table import Table from astropy.utils.compat.optional_deps import HAS_BLEACH, HAS_BS4 from .common import setup_function, teardown_function # noqa: F401 if HAS_BS4: from bs4 import BeautifulSoup, FeatureNotFound @pytest.mark.skipif(not HAS_BS4, reason="requires BeautifulSoup4") def test_soupstring(): """ Test to make sure the class SoupString behaves properly. """ soup = BeautifulSoup( "<html><head></head><body><p>foo</p></body></html>", "html.parser" ) soup_str = html.SoupString(soup) assert isinstance(soup_str, str) assert isinstance(soup_str, html.SoupString) assert soup_str == "<html><head></head><body><p>foo</p></body></html>" assert soup_str.soup is soup def test_listwriter(): """ Test to make sure the class ListWriter behaves properly. """ lst = [] writer = html.ListWriter(lst) for i in range(5): writer.write(i) for ch in "abcde": writer.write(ch) assert lst == [0, 1, 2, 3, 4, "a", "b", "c", "d", "e"] @pytest.mark.skipif(not HAS_BS4, reason="requires BeautifulSoup4") def test_identify_table(): """ Test to make sure that identify_table() returns whether the given BeautifulSoup tag is the correct table to process. """ # Should return False on non-<table> tags and None soup = BeautifulSoup("<html><body></body></html>", "html.parser") assert html.identify_table(soup, {}, 0) is False assert html.identify_table(None, {}, 0) is False soup = BeautifulSoup( '<table id="foo"><tr><th>A</th></tr><tr><td>B</td></tr></table>', "html.parser", ).table assert html.identify_table(soup, {}, 2) is False assert html.identify_table(soup, {}, 1) is True # Default index of 1 # Same tests, but with explicit parameter assert html.identify_table(soup, {"table_id": 2}, 1) is False assert html.identify_table(soup, {"table_id": 1}, 1) is True # Test identification by string ID assert html.identify_table(soup, {"table_id": "bar"}, 1) is False assert html.identify_table(soup, {"table_id": "foo"}, 1) is True @pytest.mark.skipif(not HAS_BS4, reason="requires BeautifulSoup4") def test_missing_data(): """ Test reading a table with missing data """ # First with default where blank => '0' table_in = [ "<table>", "<tr><th>A</th></tr>", "<tr><td></td></tr>", "<tr><td>1</td></tr>", "</table>", ] dat = Table.read(table_in, format="ascii.html") assert dat.masked is False assert np.all(dat["A"].mask == [True, False]) assert dat["A"].dtype.kind == "i" # Now with a specific value '...' => missing table_in = [ "<table>", "<tr><th>A</th></tr>", "<tr><td>...</td></tr>", "<tr><td>1</td></tr>", "</table>", ] dat = Table.read(table_in, format="ascii.html", fill_values=[("...", "0")]) assert dat.masked is False assert np.all(dat["A"].mask == [True, False]) assert dat["A"].dtype.kind == "i" @pytest.mark.skipif(not HAS_BS4, reason="requires BeautifulSoup4") def test_rename_cols(): """ Test reading a table and renaming cols """ table_in = [ "<table>", "<tr><th>A</th> <th>B</th></tr>", "<tr><td>1</td><td>2</td></tr>", "</table>", ] # Swap column names dat = Table.read(table_in, format="ascii.html", names=["B", "A"]) assert dat.colnames == ["B", "A"] assert len(dat) == 1 # Swap column names and only include A (the renamed version) dat = Table.read( table_in, format="ascii.html", names=["B", "A"], include_names=["A"] ) assert dat.colnames == ["A"] assert len(dat) == 1 assert np.all(dat["A"] == 2) @pytest.mark.skipif(not HAS_BS4, reason="requires BeautifulSoup4") def test_no_names(): """ Test reading a table with no column header """ table_in = ["<table>", "<tr><td>1</td></tr>", "<tr><td>2</td></tr>", "</table>"] dat = Table.read(table_in, format="ascii.html") assert dat.colnames == ["col1"] assert len(dat) == 2 dat = Table.read(table_in, format="ascii.html", names=["a"]) assert dat.colnames == ["a"] assert len(dat) == 2 @pytest.mark.skipif(not HAS_BS4, reason="requires BeautifulSoup4") def test_identify_table_fail(): """ Raise an exception with an informative error message if table_id is not found. """ table_in = ['<table id="foo"><tr><th>A</th></tr>', "<tr><td>B</td></tr></table>"] with pytest.raises(core.InconsistentTableError) as err: Table.read( table_in, format="ascii.html", htmldict={"table_id": "bad_id"}, guess=False ) assert err.match("ERROR: HTML table id 'bad_id' not found$") with pytest.raises(core.InconsistentTableError) as err: Table.read(table_in, format="ascii.html", htmldict={"table_id": 3}, guess=False) assert err.match("ERROR: HTML table number 3 not found$") @pytest.mark.skipif(not HAS_BS4, reason="requires BeautifulSoup4") def test_backend_parsers(): """ Make sure the user can specify which back-end parser to use and that an error is raised if the parser is invalid. """ for parser in ("lxml", "xml", "html.parser", "html5lib"): try: Table.read( "data/html2.html", format="ascii.html", htmldict={"parser": parser}, guess=False, ) except FeatureNotFound: if parser == "html.parser": raise # otherwise ignore if the dependency isn't present # reading should fail if the parser is invalid with pytest.raises(FeatureNotFound): Table.read( "data/html2.html", format="ascii.html", htmldict={"parser": "foo"}, guess=False, ) @pytest.mark.skipif(HAS_BS4, reason="requires no BeautifulSoup4") def test_htmlinputter_no_bs4(): """ This should return an OptionalTableImportError if BeautifulSoup is not installed. """ inputter = html.HTMLInputter() with pytest.raises(core.OptionalTableImportError): inputter.process_lines([]) @pytest.mark.skipif(not HAS_BS4, reason="requires BeautifulSoup4") def test_htmlinputter(): """ Test to ensure that HTMLInputter correctly converts input into a list of SoupStrings representing table elements. """ f = "data/html.html" with open(f) as fd: table = fd.read() inputter = html.HTMLInputter() inputter.html = {} # In absence of table_id, defaults to the first table expected = [ "<tr><th>Column 1</th><th>Column 2</th><th>Column 3</th></tr>", "<tr><td>1</td><td>a</td><td>1.05</td></tr>", "<tr><td>2</td><td>b</td><td>2.75</td></tr>", "<tr><td>3</td><td>c</td><td>-1.25</td></tr>", ] assert [str(x) for x in inputter.get_lines(table)] == expected # Should raise an InconsistentTableError if the table is not found inputter.html = {"table_id": 4} with pytest.raises(core.InconsistentTableError): inputter.get_lines(table) # Identification by string ID inputter.html["table_id"] = "second" expected = [ "<tr><th>Column A</th><th>Column B</th><th>Column C</th></tr>", "<tr><td>4</td><td>d</td><td>10.5</td></tr>", "<tr><td>5</td><td>e</td><td>27.5</td></tr>", "<tr><td>6</td><td>f</td><td>-12.5</td></tr>", ] assert [str(x) for x in inputter.get_lines(table)] == expected # Identification by integer index inputter.html["table_id"] = 3 expected = [ "<tr><th>C1</th><th>C2</th><th>C3</th></tr>", "<tr><td>7</td><td>g</td><td>105.0</td></tr>", "<tr><td>8</td><td>h</td><td>275.0</td></tr>", "<tr><td>9</td><td>i</td><td>-125.0</td></tr>", ] assert [str(x) for x in inputter.get_lines(table)] == expected @pytest.mark.skipif(not HAS_BS4, reason="requires BeautifulSoup4") def test_htmlsplitter(): """ Test to make sure that HTMLSplitter correctly inputs lines of type SoupString to return a generator that gives all header and data elements. """ splitter = html.HTMLSplitter() lines = [ html.SoupString( BeautifulSoup( "<table><tr><th>Col 1</th><th>Col 2</th></tr></table>", "html.parser" ).tr ), html.SoupString( BeautifulSoup( "<table><tr><td>Data 1</td><td>Data 2</td></tr></table>", "html.parser" ).tr ), ] expected_data = [["Col 1", "Col 2"], ["Data 1", "Data 2"]] assert list(splitter(lines)) == expected_data # Make sure the presence of a non-SoupString triggers a TypeError lines.append("<tr><td>Data 3</td><td>Data 4</td></tr>") with pytest.raises(TypeError): list(splitter(lines)) # Make sure that passing an empty list triggers an error with pytest.raises(core.InconsistentTableError): list(splitter([])) @pytest.mark.skipif(not HAS_BS4, reason="requires BeautifulSoup4") def test_htmlheader_start(): """ Test to ensure that the start_line method of HTMLHeader returns the first line of header data. Uses t/html.html for sample input. """ f = "data/html.html" with open(f) as fd: table = fd.read() inputter = html.HTMLInputter() inputter.html = {} header = html.HTMLHeader() lines = inputter.get_lines(table) assert ( str(lines[header.start_line(lines)]) == "<tr><th>Column 1</th><th>Column 2</th><th>Column 3</th></tr>" ) inputter.html["table_id"] = "second" lines = inputter.get_lines(table) assert ( str(lines[header.start_line(lines)]) == "<tr><th>Column A</th><th>Column B</th><th>Column C</th></tr>" ) inputter.html["table_id"] = 3 lines = inputter.get_lines(table) assert ( str(lines[header.start_line(lines)]) == "<tr><th>C1</th><th>C2</th><th>C3</th></tr>" ) # start_line should return None if no valid header is found lines = [ html.SoupString( BeautifulSoup("<table><tr><td>Data</td></tr></table>", "html.parser").tr ), html.SoupString(BeautifulSoup("<p>Text</p>", "html.parser").p), ] assert header.start_line(lines) is None # Should raise an error if a non-SoupString is present lines.append("<tr><th>Header</th></tr>") with pytest.raises(TypeError): header.start_line(lines) @pytest.mark.skipif(not HAS_BS4, reason="requires BeautifulSoup4") def test_htmldata(): """ Test to ensure that the start_line and end_lines methods of HTMLData returns the first line of table data. Uses t/html.html for sample input. """ f = "data/html.html" with open(f) as fd: table = fd.read() inputter = html.HTMLInputter() inputter.html = {} data = html.HTMLData() lines = inputter.get_lines(table) assert ( str(lines[data.start_line(lines)]) == "<tr><td>1</td><td>a</td><td>1.05</td></tr>" ) # end_line returns the index of the last data element + 1 assert ( str(lines[data.end_line(lines) - 1]) == "<tr><td>3</td><td>c</td><td>-1.25</td></tr>" ) inputter.html["table_id"] = "second" lines = inputter.get_lines(table) assert ( str(lines[data.start_line(lines)]) == "<tr><td>4</td><td>d</td><td>10.5</td></tr>" ) assert ( str(lines[data.end_line(lines) - 1]) == "<tr><td>6</td><td>f</td><td>-12.5</td></tr>" ) inputter.html["table_id"] = 3 lines = inputter.get_lines(table) assert ( str(lines[data.start_line(lines)]) == "<tr><td>7</td><td>g</td><td>105.0</td></tr>" ) assert ( str(lines[data.end_line(lines) - 1]) == "<tr><td>9</td><td>i</td><td>-125.0</td></tr>" ) # start_line should raise an error if no table data exists lines = [ html.SoupString(BeautifulSoup("<div></div>", "html.parser").div), html.SoupString(BeautifulSoup("<p>Text</p>", "html.parser").p), ] with pytest.raises(core.InconsistentTableError): data.start_line(lines) # end_line should return None if no table data exists assert data.end_line(lines) is None # Should raise an error if a non-SoupString is present lines.append("<tr><td>Data</td></tr>") with pytest.raises(TypeError): data.start_line(lines) with pytest.raises(TypeError): data.end_line(lines) def test_multicolumn_write(): """ Test to make sure that the HTML writer writes multidimensional columns (those with iterable elements) using the colspan attribute of <th>. """ col1 = [1, 2, 3] col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)] col3 = [("a", "a", "a"), ("b", "b", "b"), ("c", "c", "c")] table = Table([col1, col2, col3], names=("C1", "C2", "C3")) expected = """\ <html> <head> <meta charset="utf-8"/> <meta content="text/html;charset=UTF-8" http-equiv="Content-type"/> </head> <body> <table> <thead> <tr> <th>C1</th> <th colspan="2">C2</th> <th colspan="3">C3</th> </tr> </thead> <tr> <td>1</td> <td>1.0</td> <td>1.0</td> <td>a</td> <td>a</td> <td>a</td> </tr> <tr> <td>2</td> <td>2.0</td> <td>2.0</td> <td>b</td> <td>b</td> <td>b</td> </tr> <tr> <td>3</td> <td>3.0</td> <td>3.0</td> <td>c</td> <td>c</td> <td>c</td> </tr> </table> </body> </html> """ out = html.HTML().write(table)[0].strip() assert out == expected.strip() @pytest.mark.skipif(not HAS_BLEACH, reason="requires bleach") def test_multicolumn_write_escape(): """ Test to make sure that the HTML writer writes multidimensional columns (those with iterable elements) using the colspan attribute of <th>. """ col1 = [1, 2, 3] col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)] col3 = [("<a></a>", "<a></a>", "a"), ("<b></b>", "b", "b"), ("c", "c", "c")] table = Table([col1, col2, col3], names=("C1", "C2", "C3")) expected = """\ <html> <head> <meta charset="utf-8"/> <meta content="text/html;charset=UTF-8" http-equiv="Content-type"/> </head> <body> <table> <thead> <tr> <th>C1</th> <th colspan="2">C2</th> <th colspan="3">C3</th> </tr> </thead> <tr> <td>1</td> <td>1.0</td> <td>1.0</td> <td><a></a></td> <td><a></a></td> <td>a</td> </tr> <tr> <td>2</td> <td>2.0</td> <td>2.0</td> <td><b></b></td> <td>b</td> <td>b</td> </tr> <tr> <td>3</td> <td>3.0</td> <td>3.0</td> <td>c</td> <td>c</td> <td>c</td> </tr> </table> </body> </html> """ out = html.HTML(htmldict={"raw_html_cols": "C3"}).write(table)[0].strip() assert out == expected.strip() def test_write_no_multicols(): """ Test to make sure that the HTML writer will not use multi-dimensional columns if the multicol parameter is False. """ col1 = [1, 2, 3] col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)] col3 = [("a", "a", "a"), ("b", "b", "b"), ("c", "c", "c")] table = Table([col1, col2, col3], names=("C1", "C2", "C3")) expected = """\ <html> <head> <meta charset="utf-8"/> <meta content="text/html;charset=UTF-8" http-equiv="Content-type"/> </head> <body> <table> <thead> <tr> <th>C1</th> <th>C2</th> <th>C3</th> </tr> </thead> <tr> <td>1</td> <td>1.0 .. 1.0</td> <td>a .. a</td> </tr> <tr> <td>2</td> <td>2.0 .. 2.0</td> <td>b .. b</td> </tr> <tr> <td>3</td> <td>3.0 .. 3.0</td> <td>c .. c</td> </tr> </table> </body> </html> """ assert html.HTML({"multicol": False}).write(table)[0].strip() == expected.strip() @pytest.mark.skipif(not HAS_BS4, reason="requires BeautifulSoup4") def test_multicolumn_read(): """ Test to make sure that the HTML reader inputs multidimensional columns (those with iterable elements) using the colspan attribute of <th>. Ensure that any string element within a multidimensional column casts all elements to string prior to type conversion operations. """ table = Table.read("data/html2.html", format="ascii.html") str_type = np.dtype((str, 21)) expected = Table( np.array( [(["1", "2.5000000000000000001"], 3), (["1a", "1"], 3.5)], dtype=[("A", str_type, (2,)), ("B", "<f8")], ) ) assert np.all(table == expected) @pytest.mark.skipif(not HAS_BLEACH, reason="requires bleach") def test_raw_html_write(): """ Test that columns can contain raw HTML which is not escaped. """ t = Table([["<em>x</em>"], ["<em>y</em>"]], names=["a", "b"]) # One column contains raw HTML (string input) out = StringIO() t.write(out, format="ascii.html", htmldict={"raw_html_cols": "a"}) expected = """\ <tr> <td><em>x</em></td> <td>&lt;em&gt;y&lt;/em&gt;</td> </tr>""" assert expected in out.getvalue() # One column contains raw HTML (list input) out = StringIO() t.write(out, format="ascii.html", htmldict={"raw_html_cols": ["a"]}) assert expected in out.getvalue() # Two columns contains raw HTML (list input) out = StringIO() t.write(out, format="ascii.html", htmldict={"raw_html_cols": ["a", "b"]}) expected = """\ <tr> <td><em>x</em></td> <td><em>y</em></td> </tr>""" assert expected in out.getvalue() @pytest.mark.skipif(not HAS_BLEACH, reason="requires bleach") def test_raw_html_write_clean(): """ Test that columns can contain raw HTML which is not escaped. """ import bleach t = Table( [["<script>x</script>"], ["<p>y</p>"], ["<em>y</em>"]], names=["a", "b", "c"] ) # Confirm that <script> and <p> get escaped but not <em> out = StringIO() t.write(out, format="ascii.html", htmldict={"raw_html_cols": t.colnames}) expected = """\ <tr> <td>&lt;script&gt;x&lt;/script&gt;</td> <td>&lt;p&gt;y&lt;/p&gt;</td> <td><em>y</em></td> </tr>""" assert expected in out.getvalue() # Confirm that we can whitelist <p> out = StringIO() t.write( out, format="ascii.html", htmldict={ "raw_html_cols": t.colnames, "raw_html_clean_kwargs": {"tags": list(bleach.ALLOWED_TAGS) + ["p"]}, }, ) expected = """\ <tr> <td>&lt;script&gt;x&lt;/script&gt;</td> <td><p>y</p></td> <td><em>y</em></td> </tr>""" assert expected in out.getvalue() def test_write_table_html_fill_values(): """ Test that passing fill_values should replace any matching row """ buffer_output = StringIO() t = Table([[1], [2]], names=("a", "b")) ascii.write(t, buffer_output, fill_values=("1", "Hello world"), format="html") t_expected = Table([["Hello world"], [2]], names=("a", "b")) buffer_expected = StringIO() ascii.write(t_expected, buffer_expected, format="html") assert buffer_output.getvalue() == buffer_expected.getvalue() def test_write_table_html_fill_values_optional_columns(): """ Test that passing optional column in fill_values should only replace matching columns """ buffer_output = StringIO() t = Table([[1], [1]], names=("a", "b")) ascii.write(t, buffer_output, fill_values=("1", "Hello world", "b"), format="html") t_expected = Table([[1], ["Hello world"]], names=("a", "b")) buffer_expected = StringIO() ascii.write(t_expected, buffer_expected, format="html") assert buffer_output.getvalue() == buffer_expected.getvalue() def test_write_table_html_fill_values_masked(): """ Test that passing masked values in fill_values should only replace masked columns or values """ buffer_output = StringIO() t = Table([[1], [1]], names=("a", "b"), masked=True, dtype=("i4", "i8")) t["a"] = np.ma.masked ascii.write(t, buffer_output, fill_values=(ascii.masked, "TEST"), format="html") t_expected = Table([["TEST"], [1]], names=("a", "b")) buffer_expected = StringIO() ascii.write(t_expected, buffer_expected, format="html") assert buffer_output.getvalue() == buffer_expected.getvalue() def test_multicolumn_table_html_fill_values(): """ Test to make sure that the HTML writer writes multidimensional columns with correctly replaced fill_values. """ col1 = [1, 2, 3] col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)] col3 = [("a", "a", "a"), ("b", "b", "b"), ("c", "c", "c")] buffer_output = StringIO() t = Table([col1, col2, col3], names=("C1", "C2", "C3")) ascii.write(t, buffer_output, fill_values=("a", "z"), format="html") col1 = [1, 2, 3] col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)] col3 = [("z", "z", "z"), ("b", "b", "b"), ("c", "c", "c")] buffer_expected = StringIO() t_expected = Table([col1, col2, col3], names=("C1", "C2", "C3")) ascii.write(t_expected, buffer_expected, format="html") assert buffer_output.getvalue() == buffer_expected.getvalue() def test_multi_column_write_table_html_fill_values_masked(): """ Test that passing masked values in fill_values should only replace masked columns or values for multidimensional tables """ buffer_output = StringIO() t = Table([[1, 2, 3, 4], ["--", "a", "--", "b"]], names=("a", "b"), masked=True) t["a"][0:2] = np.ma.masked t["b"][0:2] = np.ma.masked ascii.write(t, buffer_output, fill_values=[(ascii.masked, "MASKED")], format="html") t_expected = Table( [["MASKED", "MASKED", 3, 4], ["MASKED", "MASKED", "--", "b"]], names=("a", "b") ) buffer_expected = StringIO() ascii.write(t_expected, buffer_expected, format="html") print(buffer_expected.getvalue()) assert buffer_output.getvalue() == buffer_expected.getvalue() def test_write_table_formatted_columns(): """ Test to make sure that the HTML writer writes out using the supplied formatting. """ col1 = [1, 2] col2 = [1.234567e-11, -9.876543e11] formats = {"C1": "04d", "C2": ".2e"} table = Table([col1, col2], names=formats.keys()) expected = """\ <html> <head> <meta charset="utf-8"/> <meta content="text/html;charset=UTF-8" http-equiv="Content-type"/> </head> <body> <table> <thead> <tr> <th>C1</th> <th>C2</th> </tr> </thead> <tr> <td>0001</td> <td>1.23e-11</td> </tr> <tr> <td>0002</td> <td>-9.88e+11</td> </tr> </table> </body> </html> """ with StringIO() as sp: table.write(sp, format="html", formats=formats) out = sp.getvalue().strip() assert out == expected.strip() @pytest.mark.skipif(not HAS_BS4, reason="requires BeautifulSoup4") def test_read_html_unicode(): """ Test reading an HTML table with unicode values """ table_in = [ "<table>", "<tr><td>&#x0394;</td></tr>", "<tr><td>Δ</td></tr>", "</table>", ] dat = Table.read(table_in, format="ascii.html") assert np.all(dat["col1"] == ["Δ", "Δ"])
3b1f56dff0ed7a019b342c704a553ee090dcb712f56eedc499d26d7fde4a10ca
# Licensed under a 3-clause BSD style license - see LICENSE.rst import locale import pathlib import platform import re from collections import OrderedDict from io import BytesIO, StringIO import numpy as np import pytest from astropy import table from astropy.io import ascii from astropy.io.ascii import core from astropy.io.ascii.core import convert_numpy from astropy.io.ascii.ui import _probably_html, get_read_trace from astropy.table import MaskedColumn, Table from astropy.table.table_helpers import simple_table from astropy.units import Unit # NOTE: Python can be built without bz2. from astropy.utils.compat.optional_deps import HAS_BZ2 from astropy.utils.data import get_pkg_data_path from astropy.utils.exceptions import AstropyWarning # setup/teardown function to have the tests run in the correct directory from .common import setup_function # noqa: F401 from .common import teardown_function # noqa: F401 from .common import assert_almost_equal, assert_equal, assert_true def asciiIO(x): return BytesIO(x.encode("ascii")) @pytest.fixture def home_is_data(monkeypatch, request): """ Pytest fixture to run a test case with tilde-prefixed paths. In the tilde-path case, environment variables are temporarily modified so that '~' resolves to the data directory. """ path = get_pkg_data_path("data") # For Unix monkeypatch.setenv("HOME", path) # For Windows monkeypatch.setenv("USERPROFILE", path) @pytest.mark.parametrize( "fast_reader", [True, False, {"use_fast_converter": False}, {"use_fast_converter": True}, "force"], ) def test_convert_overflow(fast_reader): """ Test reading an extremely large integer, which falls through to string due to an overflow error (#2234). The C parsers used to return inf (kind 'f') for this. """ expected_kind = "U" with pytest.warns( AstropyWarning, match="OverflowError converting to IntType in column a" ): dat = ascii.read( ["a", "1" * 10000], format="basic", fast_reader=fast_reader, guess=False ) assert dat["a"].dtype.kind == expected_kind def test_read_specify_converters_with_names(): """ Exact example from #9701: When using ascii.read with both the names and converters arguments, the converters dictionary ignores the user-supplied names and requires that you know the guessed names. """ csv_text = ["a,b,c", "1,2,3", "4,5,6"] names = ["A", "B", "C"] converters = { "A": [ascii.convert_numpy(float)], "B": [ascii.convert_numpy(int)], "C": [ascii.convert_numpy(str)], } t = ascii.read(csv_text, format="csv", names=names, converters=converters) assert t["A"].dtype.kind == "f" assert t["B"].dtype.kind == "i" assert t["C"].dtype.kind == "U" def test_read_remove_and_rename_columns(): csv_text = ["a,b,c", "1,2,3", "4,5,6"] reader = ascii.get_reader(Reader=ascii.Csv) reader.read(csv_text) header = reader.header with pytest.raises(KeyError, match="Column NOT-EXIST does not exist"): header.remove_columns(["NOT-EXIST"]) header.remove_columns(["c"]) assert header.colnames == ("a", "b") header.rename_column("a", "aa") assert header.colnames == ("aa", "b") with pytest.raises(KeyError, match="Column NOT-EXIST does not exist"): header.rename_column("NOT-EXIST", "aa") def test_guess_with_names_arg(): """ Make sure reading a table with guess=True gives the expected result when the names arg is specified. """ # This is a NoHeader format table and so `names` should replace # the default col0, col1 names. It fails as a Basic format # table when guessing because the column names would be '1', '2'. dat = ascii.read(["1,2", "3,4"], names=("a", "b")) assert len(dat) == 2 assert dat.colnames == ["a", "b"] # This is a Basic format table and the first row # gives the column names 'c', 'd', which get replaced by 'a', 'b' dat = ascii.read(["c,d", "3,4"], names=("a", "b")) assert len(dat) == 1 assert dat.colnames == ["a", "b"] # This is also a Basic format table and the first row # gives the column names 'c', 'd', which get replaced by 'a', 'b' dat = ascii.read(["c d", "e f"], names=("a", "b")) assert len(dat) == 1 assert dat.colnames == ["a", "b"] def test_guess_with_format_arg(): """ When the format or Reader is explicitly given then disable the strict column name checking in guessing. """ dat = ascii.read(["1,2", "3,4"], format="basic") assert len(dat) == 1 assert dat.colnames == ["1", "2"] dat = ascii.read(["1,2", "3,4"], names=("a", "b"), format="basic") assert len(dat) == 1 assert dat.colnames == ["a", "b"] dat = ascii.read(["1,2", "3,4"], Reader=ascii.Basic) assert len(dat) == 1 assert dat.colnames == ["1", "2"] dat = ascii.read(["1,2", "3,4"], names=("a", "b"), Reader=ascii.Basic) assert len(dat) == 1 assert dat.colnames == ["a", "b"] # For good measure check the same in the unified I/O interface dat = Table.read(["1,2", "3,4"], format="ascii.basic") assert len(dat) == 1 assert dat.colnames == ["1", "2"] dat = Table.read(["1,2", "3,4"], format="ascii.basic", names=("a", "b")) assert len(dat) == 1 assert dat.colnames == ["a", "b"] def test_guess_with_delimiter_arg(): """ When the delimiter is explicitly given then do not try others in guessing. """ fields = ["10.1E+19", "3.14", "2048", "-23"] values = [1.01e20, 3.14, 2048, -23] # Default guess should recognise CSV with optional spaces t0 = ascii.read(asciiIO(", ".join(fields)), guess=True) for n, v in zip(t0.colnames, values): assert t0[n][0] == v # Forcing space as delimiter produces type str columns ('10.1E+19,') t1 = ascii.read(asciiIO(", ".join(fields)), guess=True, delimiter=" ") for n, v in zip(t1.colnames[:-1], fields[:-1]): assert t1[n][0] == v + "," def test_reading_mixed_delimiter_tabs_spaces(): # Regression test for https://github.com/astropy/astropy/issues/6770 dat = ascii.read("1 2\t3\n1 2\t3", format="no_header", names=list("abc")) assert len(dat) == 2 Table.read(["1 2\t3", "1 2\t3"], format="ascii.no_header", names=["a", "b", "c"]) assert len(dat) == 2 @pytest.mark.parametrize("fast_reader", [True, False, "force"]) def test_read_with_names_arg(fast_reader): """ Test that a bad value of `names` raises an exception. """ # CParser only uses columns in `names` and thus reports mismatch in num_col with pytest.raises(ascii.InconsistentTableError): ascii.read(["c d", "e f"], names=("a",), guess=False, fast_reader=fast_reader) @pytest.mark.parametrize("fast_reader", [True, False, "force"]) @pytest.mark.parametrize("path_format", ["plain", "tilde-str", "tilde-pathlib"]) def test_read_all_files(fast_reader, path_format, home_is_data): for testfile in get_testfiles(): if testfile.get("skip"): print(f"\n\n******** SKIPPING {testfile['name']}") continue if "tilde" in path_format: if "str" in path_format: testfile["name"] = "~/" + testfile["name"][5:] else: testfile["name"] = pathlib.Path("~/", testfile["name"][5:]) print(f"\n\n******** READING {testfile['name']}") for guess in (True, False): test_opts = testfile["opts"].copy() if "guess" not in test_opts: test_opts["guess"] = guess if ( "Reader" in test_opts and f"fast_{test_opts['Reader']._format_name}" in core.FAST_CLASSES ): # has fast version if "Inputter" not in test_opts: # fast reader doesn't allow this test_opts["fast_reader"] = fast_reader table = ascii.read(testfile["name"], **test_opts) assert_equal(table.dtype.names, testfile["cols"]) for colname in table.dtype.names: assert_equal(len(table[colname]), testfile["nrows"]) @pytest.mark.parametrize("fast_reader", [True, False, "force"]) @pytest.mark.parametrize("path_format", ["plain", "tilde-str", "tilde-pathlib"]) def test_read_all_files_via_table(fast_reader, path_format, home_is_data): for testfile in get_testfiles(): if testfile.get("skip"): print(f"\n\n******** SKIPPING {testfile['name']}") continue if "tilde" in path_format: if "str" in path_format: testfile["name"] = "~/" + testfile["name"][5:] else: testfile["name"] = pathlib.Path("~/", testfile["name"][5:]) print(f"\n\n******** READING {testfile['name']}") for guess in (True, False): test_opts = testfile["opts"].copy() if "guess" not in test_opts: test_opts["guess"] = guess if "Reader" in test_opts: format = f"ascii.{test_opts['Reader']._format_name}" del test_opts["Reader"] else: format = "ascii" if f"fast_{format}" in core.FAST_CLASSES: test_opts["fast_reader"] = fast_reader table = Table.read(testfile["name"], format=format, **test_opts) assert_equal(table.dtype.names, testfile["cols"]) for colname in table.dtype.names: assert_equal(len(table[colname]), testfile["nrows"]) def test_guess_all_files(): for testfile in get_testfiles(): if testfile.get("skip"): print(f"\n\n******** SKIPPING {testfile['name']}") continue if not testfile["opts"].get("guess", True): continue print(f"\n\n******** READING {testfile['name']}") for filter_read_opts in (["Reader", "delimiter", "quotechar"], []): # Copy read options except for those in filter_read_opts guess_opts = { k: v for k, v in testfile["opts"].items() if k not in filter_read_opts } table = ascii.read(testfile["name"], guess=True, **guess_opts) assert_equal(table.dtype.names, testfile["cols"]) for colname in table.dtype.names: assert_equal(len(table[colname]), testfile["nrows"]) def test_validate_read_kwargs(): lines = ["a b", "1 2", "3 4"] # Check that numpy integers are allowed out = ascii.read(lines, data_start=np.int16(2)) assert np.all(out["a"] == [3]) with pytest.raises( TypeError, match=r"read\(\) argument 'data_end' must be a " r"<class 'int'> object, " r"got <class 'str'> instead", ): ascii.read(lines, data_end="needs integer") with pytest.raises( TypeError, match=r"read\(\) argument 'fill_include_names' must " r"be a list-like object, got <class 'str'> instead", ): ascii.read(lines, fill_include_names="ID") def test_daophot_indef(): """Test that INDEF is correctly interpreted as a missing value""" table = ascii.read("data/daophot2.dat", Reader=ascii.Daophot) for col in table.itercols(): # Four columns have all INDEF values and are masked, rest are normal Column if col.name in ("OTIME", "MAG", "MERR", "XAIRMASS"): assert np.all(col.mask) else: assert not hasattr(col, "mask") def test_daophot_types(): """ Test specific data types which are different from what would be inferred automatically based only data values. DAOphot reader uses the header information to assign types. """ table = ascii.read("data/daophot2.dat", Reader=ascii.Daophot) assert table["LID"].dtype.char in "fd" # float or double assert table["MAG"].dtype.char in "fd" # even without any data values assert ( table["PIER"].dtype.char in "US" ) # string (data values are consistent with int) assert table["ID"].dtype.char in "il" # int or long def test_daophot_header_keywords(): table = ascii.read("data/daophot.dat", Reader=ascii.Daophot) expected_keywords = ( ("NSTARFILE", "test.nst.1", "filename", "%-23s"), ("REJFILE", '"hello world"', "filename", "%-23s"), ("SCALE", "1.", "units/pix", "%-23.7g"), ) keywords = table.meta["keywords"] # Ordered dict of keyword structures for name, value, units, format_ in expected_keywords: keyword = keywords[name] assert_equal(keyword["value"], value) assert_equal(keyword["units"], units) assert_equal(keyword["format"], format_) def test_daophot_multiple_aperture(): table = ascii.read("data/daophot3.dat", Reader=ascii.Daophot) assert "MAG5" in table.colnames # MAG5 is one of the newly created column names assert table["MAG5"][4] == 22.13 # A sample entry in daophot3.dat file assert table["MERR2"][0] == 1.171 assert np.all( table["RAPERT5"] == 23.3 ) # assert all the 5th apertures are same 23.3 def test_daophot_multiple_aperture2(): table = ascii.read("data/daophot4.dat", Reader=ascii.Daophot) assert "MAG15" in table.colnames # MAG15 is one of the newly created column name assert table["MAG15"][1] == -7.573 # A sample entry in daophot4.dat file assert table["MERR2"][0] == 0.049 assert np.all(table["RAPERT5"] == 5.0) # assert all the 5th apertures are same 5.0 @pytest.mark.parametrize("fast_reader", [True, False, "force"]) def test_empty_table_no_header(fast_reader): with pytest.raises(ascii.InconsistentTableError): ascii.read( "data/no_data_without_header.dat", Reader=ascii.NoHeader, guess=False, fast_reader=fast_reader, ) @pytest.mark.parametrize("fast_reader", [True, False, "force"]) def test_wrong_quote(fast_reader): with pytest.raises(ascii.InconsistentTableError): ascii.read("data/simple.txt", guess=False, fast_reader=fast_reader) @pytest.mark.parametrize("fast_reader", [True, False, "force"]) def test_extra_data_col(fast_reader): with pytest.raises(ascii.InconsistentTableError): ascii.read("data/bad.txt", fast_reader=fast_reader) @pytest.mark.parametrize("fast_reader", [True, False, "force"]) def test_extra_data_col2(fast_reader): with pytest.raises(ascii.InconsistentTableError): ascii.read("data/simple5.txt", delimiter="|", fast_reader=fast_reader) def test_missing_file(): with pytest.raises(OSError): ascii.read("does_not_exist") @pytest.mark.parametrize("fast_reader", [True, False, "force"]) def test_set_names(fast_reader): names = ("c1", "c2", "c3", "c4", "c5", "c6") data = ascii.read( "data/simple3.txt", names=names, delimiter="|", fast_reader=fast_reader ) assert_equal(data.dtype.names, names) @pytest.mark.parametrize("fast_reader", [True, False, "force"]) def test_set_include_names(fast_reader): names = ("c1", "c2", "c3", "c4", "c5", "c6") include_names = ("c1", "c3") data = ascii.read( "data/simple3.txt", names=names, include_names=include_names, delimiter="|", fast_reader=fast_reader, ) assert_equal(data.dtype.names, include_names) @pytest.mark.parametrize("fast_reader", [True, False, "force"]) def test_set_exclude_names(fast_reader): exclude_names = ("Y", "object") data = ascii.read( "data/simple3.txt", exclude_names=exclude_names, delimiter="|", fast_reader=fast_reader, ) assert_equal(data.dtype.names, ("obsid", "redshift", "X", "rad")) def test_include_names_daophot(): include_names = ("ID", "MAG", "PIER") data = ascii.read("data/daophot.dat", include_names=include_names) assert_equal(data.dtype.names, include_names) def test_exclude_names_daophot(): exclude_names = ("ID", "YCENTER", "MERR", "NITER", "CHI", "PERROR") data = ascii.read("data/daophot.dat", exclude_names=exclude_names) assert_equal(data.dtype.names, ("XCENTER", "MAG", "MSKY", "SHARPNESS", "PIER")) def test_custom_process_lines(): def process_lines(lines): bars_at_ends = re.compile(r"^\| | \|$", re.VERBOSE) striplines = (x.strip() for x in lines) return [bars_at_ends.sub("", x) for x in striplines if len(x) > 0] reader = ascii.get_reader(delimiter="|") reader.inputter.process_lines = process_lines data = reader.read("data/bars_at_ends.txt") assert_equal(data.dtype.names, ("obsid", "redshift", "X", "Y", "object", "rad")) assert_equal(len(data), 3) def test_custom_process_line(): def process_line(line): line_out = re.sub(r"^\|\s*", "", line.strip()) return line_out reader = ascii.get_reader(data_start=2, delimiter="|") reader.header.splitter.process_line = process_line reader.data.splitter.process_line = process_line data = reader.read("data/nls1_stackinfo.dbout") cols = get_testfiles("data/nls1_stackinfo.dbout")["cols"] assert_equal(data.dtype.names, cols[1:]) def test_custom_splitters(): reader = ascii.get_reader() reader.header.splitter = ascii.BaseSplitter() reader.data.splitter = ascii.BaseSplitter() f = "data/test4.dat" data = reader.read(f) testfile = get_testfiles(f) assert_equal(data.dtype.names, testfile["cols"]) assert_equal(len(data), testfile["nrows"]) assert_almost_equal(data.field("zabs1.nh")[2], 0.0839710433091) assert_almost_equal(data.field("p1.gamma")[2], 1.25997502704) assert_almost_equal(data.field("p1.ampl")[2], 0.000696444029148) assert_equal(data.field("statname")[2], "chi2modvar") assert_almost_equal(data.field("statval")[2], 497.56468441) def test_start_end(): data = ascii.read("data/test5.dat", header_start=1, data_start=3, data_end=-5) assert_equal(len(data), 13) assert_equal(data.field("statname")[0], "chi2xspecvar") assert_equal(data.field("statname")[-1], "chi2gehrels") def test_set_converters(): converters = { "zabs1.nh": [ascii.convert_numpy("int32"), ascii.convert_numpy("float32")], "p1.gamma": [ascii.convert_numpy("str")], } data = ascii.read("data/test4.dat", converters=converters) assert_equal(str(data["zabs1.nh"].dtype), "float32") assert_equal(data["p1.gamma"][0], "1.26764500000") @pytest.mark.parametrize("fast_reader", [True, False, "force"]) def test_from_string(fast_reader): f = "data/simple.txt" with open(f) as fd: table = fd.read() testfile = get_testfiles(f)[0] data = ascii.read(table, fast_reader=fast_reader, **testfile["opts"]) assert_equal(data.dtype.names, testfile["cols"]) assert_equal(len(data), testfile["nrows"]) @pytest.mark.parametrize("fast_reader", [True, False, "force"]) def test_from_filelike(fast_reader): f = "data/simple.txt" testfile = get_testfiles(f)[0] with open(f, "rb") as fd: data = ascii.read(fd, fast_reader=fast_reader, **testfile["opts"]) assert_equal(data.dtype.names, testfile["cols"]) assert_equal(len(data), testfile["nrows"]) @pytest.mark.parametrize("fast_reader", [True, False, "force"]) def test_from_lines(fast_reader): f = "data/simple.txt" with open(f) as fd: table = fd.readlines() testfile = get_testfiles(f)[0] data = ascii.read(table, fast_reader=fast_reader, **testfile["opts"]) assert_equal(data.dtype.names, testfile["cols"]) assert_equal(len(data), testfile["nrows"]) def test_comment_lines(): table = ascii.get_reader(Reader=ascii.Rdb) data = table.read("data/apostrophe.rdb") assert_equal(table.comment_lines, ["# first comment", " # second comment"]) assert_equal(data.meta["comments"], ["first comment", "second comment"]) @pytest.mark.parametrize("fast_reader", [True, False, "force"]) def test_fill_values(fast_reader): f = "data/fill_values.txt" testfile = get_testfiles(f) data = ascii.read( f, fill_values=("a", "1"), fast_reader=fast_reader, **testfile["opts"] ) assert_true((data["a"].mask == [False, True]).all()) assert_true((data["a"] == [1, 1]).all()) assert_true((data["b"].mask == [False, True]).all()) assert_true((data["b"] == [2, 1]).all()) @pytest.mark.parametrize("fast_reader", [True, False, "force"]) def test_fill_values_col(fast_reader): f = "data/fill_values.txt" testfile = get_testfiles(f) data = ascii.read( f, fill_values=("a", "1", "b"), fast_reader=fast_reader, **testfile["opts"] ) check_fill_values(data) @pytest.mark.parametrize("fast_reader", [True, False, "force"]) def test_fill_values_include_names(fast_reader): f = "data/fill_values.txt" testfile = get_testfiles(f) data = ascii.read( f, fill_values=("a", "1"), fast_reader=fast_reader, fill_include_names=["b"], **testfile["opts"], ) check_fill_values(data) @pytest.mark.parametrize("fast_reader", [True, False, "force"]) def test_fill_values_exclude_names(fast_reader): f = "data/fill_values.txt" testfile = get_testfiles(f) data = ascii.read( f, fill_values=("a", "1"), fast_reader=fast_reader, fill_exclude_names=["a"], **testfile["opts"], ) check_fill_values(data) def check_fill_values(data): """compare array column by column with expectation""" assert not hasattr(data["a"], "mask") assert_true((data["a"] == ["1", "a"]).all()) assert_true((data["b"].mask == [False, True]).all()) # Check that masked value is "do not care" in comparison assert_true((data["b"] == [2, -999]).all()) data["b"].mask = False # explicitly unmask for comparison assert_true((data["b"] == [2, 1]).all()) @pytest.mark.parametrize("fast_reader", [True, False, "force"]) def test_fill_values_list(fast_reader): f = "data/fill_values.txt" testfile = get_testfiles(f) data = ascii.read( f, fill_values=[("a", "42"), ("1", "42", "a")], fast_reader=fast_reader, **testfile["opts"], ) data["a"].mask = False # explicitly unmask for comparison assert_true((data["a"] == [42, 42]).all()) def test_masking_Cds_Mrt(): f = "data/cds.dat" # Tested for CDS and MRT for testfile in get_testfiles(f): data = ascii.read(f, **testfile["opts"]) assert_true(data["AK"].mask[0]) assert not hasattr(data["Fit"], "mask") def test_null_Ipac(): f = "data/ipac.dat" testfile = get_testfiles(f)[0] data = ascii.read(f, **testfile["opts"]) mask = np.array( [(True, False, True, False, True), (False, False, False, False, False)], dtype=[ ("ra", "|b1"), ("dec", "|b1"), ("sai", "|b1"), ("v2", "|b1"), ("sptype", "|b1"), ], ) assert np.all(data.mask == mask) def test_Ipac_meta(): keywords = OrderedDict( ( ("intval", 1), ("floatval", 2.3e3), ("date", "Wed Sp 20 09:48:36 1995"), ("key_continue", "IPAC keywords can continue across lines"), ) ) comments = ["This is an example of a valid comment"] f = "data/ipac.dat" testfile = get_testfiles(f)[0] data = ascii.read(f, **testfile["opts"]) assert data.meta["keywords"].keys() == keywords.keys() for data_kv, kv in zip(data.meta["keywords"].values(), keywords.values()): assert data_kv["value"] == kv assert data.meta["comments"] == comments def test_set_guess_kwarg(): """Read a file using guess with one of the typical guess_kwargs explicitly set.""" data = ascii.read("data/space_delim_no_header.dat", delimiter=",", guess=True) assert data.dtype.names == ("1 3.4 hello",) assert len(data) == 1 @pytest.mark.parametrize("fast_reader", [True, False, "force"]) def test_read_rdb_wrong_type(fast_reader): """Read RDB data with inconsistent data type (except failure)""" table = """col1\tcol2 N\tN 1\tHello""" with pytest.raises(ValueError): ascii.read(table, Reader=ascii.Rdb, fast_reader=fast_reader) @pytest.mark.parametrize("fast_reader", [True, False, "force"]) def test_default_missing(fast_reader): """ Read a table with empty values and ensure that corresponding entries are masked """ table = "\n".join( [ "a,b,c,d", "1,3,,", "2, , 4.0 , ss ", ] ) dat = ascii.read(table, fast_reader=fast_reader) assert dat.masked is False assert dat.pformat() == [ " a b c d ", "--- --- --- ---", " 1 3 -- --", " 2 -- 4.0 ss", ] # Single row table with a single missing element table = """ a \n "" """ dat = ascii.read(table, fast_reader=fast_reader) assert dat.pformat() == [" a ", "---", " --"] assert dat["a"].dtype.kind == "i" # Same test with a fixed width reader table = "\n".join( [ " a b c d ", "--- --- --- ---", " 1 3 ", " 2 4.0 ss", ] ) dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine) assert dat.masked is False assert dat.pformat() == [ " a b c d ", "--- --- --- ---", " 1 3 -- --", " 2 -- 4.0 ss", ] dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine, fill_values=None) assert dat.masked is False assert dat.pformat() == [ " a b c d ", "--- --- --- ---", " 1 3 ", " 2 4.0 ss", ] dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine, fill_values=[]) assert dat.masked is False assert dat.pformat() == [ " a b c d ", "--- --- --- ---", " 1 3 ", " 2 4.0 ss", ] def get_testfiles(name=None): """Set up information about the columns, number of rows, and reader params to read a bunch of test files and verify columns and number of rows.""" testfiles = [ { "cols": ("agasc_id", "n_noids", "n_obs"), "name": "data/apostrophe.rdb", "nrows": 2, "opts": {"Reader": ascii.Rdb}, }, { "cols": ("agasc_id", "n_noids", "n_obs"), "name": "data/apostrophe.tab", "nrows": 2, "opts": {"Reader": ascii.Tab}, }, { "cols": ( "Index", "RAh", "RAm", "RAs", "DE-", "DEd", "DEm", "DEs", "Match", "Class", "AK", "Fit", ), "name": "data/cds.dat", "nrows": 1, "opts": {"Reader": ascii.Cds}, }, { "cols": ( "Index", "RAh", "RAm", "RAs", "DE-", "DEd", "DEm", "DEs", "Match", "Class", "AK", "Fit", ), "name": "data/cds.dat", "nrows": 1, "opts": {"Reader": ascii.Mrt}, }, # Test malformed CDS file (issues #2241 #467) { "cols": ( "Index", "RAh", "RAm", "RAs", "DE-", "DEd", "DEm", "DEs", "Match", "Class", "AK", "Fit", ), "name": "data/cds_malformed.dat", "nrows": 1, "opts": {"Reader": ascii.Cds, "data_start": "guess"}, }, { "cols": ("a", "b", "c"), "name": "data/commented_header.dat", "nrows": 2, "opts": {"Reader": ascii.CommentedHeader}, }, { "cols": ("a", "b", "c"), "name": "data/commented_header2.dat", "nrows": 2, "opts": {"Reader": ascii.CommentedHeader, "header_start": -1}, }, { "cols": ("col1", "col2", "col3", "col4", "col5"), "name": "data/continuation.dat", "nrows": 2, "opts": { "Inputter": ascii.ContinuationLinesInputter, "Reader": ascii.NoHeader, }, }, { "cols": ( "ID", "XCENTER", "YCENTER", "MAG", "MERR", "MSKY", "NITER", "SHARPNESS", "CHI", "PIER", "PERROR", ), "name": "data/daophot.dat", "nrows": 2, "opts": {"Reader": ascii.Daophot}, }, { "cols": ( "NUMBER", "FLUX_ISO", "FLUXERR_ISO", "VALU-ES", "VALU-ES_1", "FLAG", ), "name": "data/sextractor.dat", "nrows": 3, "opts": {"Reader": ascii.SExtractor}, }, { "cols": ("ra", "dec", "sai", "v2", "sptype"), "name": "data/ipac.dat", "nrows": 2, "opts": {"Reader": ascii.Ipac}, }, { "cols": ( "col0", "objID", "osrcid", "xsrcid", "SpecObjID", "ra", "dec", "obsid", "ccdid", "z", "modelMag_i", "modelMagErr_i", "modelMag_r", "modelMagErr_r", "expo", "theta", "rad_ecf_39", "detlim90", "fBlim90", ), "name": "data/nls1_stackinfo.dbout", "nrows": 58, "opts": {"data_start": 2, "delimiter": "|", "guess": False}, }, { "cols": ( "Index", "RAh", "RAm", "RAs", "DE-", "DEd", "DEm", "DEs", "Match", "Class", "AK", "Fit", ), "name": "data/no_data_cds.dat", "nrows": 0, "opts": {"Reader": ascii.Cds}, }, { "cols": ( "Index", "RAh", "RAm", "RAs", "DE-", "DEd", "DEm", "DEs", "Match", "Class", "AK", "Fit", ), "name": "data/no_data_cds.dat", "nrows": 0, "opts": {"Reader": ascii.Mrt}, }, { "cols": ( "ID", "XCENTER", "YCENTER", "MAG", "MERR", "MSKY", "NITER", "SHARPNESS", "CHI", "PIER", "PERROR", ), "name": "data/no_data_daophot.dat", "nrows": 0, "opts": {"Reader": ascii.Daophot}, }, { "cols": ("NUMBER", "FLUX_ISO", "FLUXERR_ISO", "VALUES", "VALUES_1", "FLAG"), "name": "data/no_data_sextractor.dat", "nrows": 0, "opts": {"Reader": ascii.SExtractor}, }, { "cols": ("ra", "dec", "sai", "v2", "sptype"), "name": "data/no_data_ipac.dat", "nrows": 0, "opts": {"Reader": ascii.Ipac}, }, { "cols": ("ra", "v2"), "name": "data/ipac.dat", "nrows": 2, "opts": {"Reader": ascii.Ipac, "include_names": ["ra", "v2"]}, }, { "cols": ("a", "b", "c"), "name": "data/no_data_with_header.dat", "nrows": 0, "opts": {}, }, { "cols": ("agasc_id", "n_noids", "n_obs"), "name": "data/short.rdb", "nrows": 7, "opts": {"Reader": ascii.Rdb}, }, { "cols": ("agasc_id", "n_noids", "n_obs"), "name": "data/short.tab", "nrows": 7, "opts": {"Reader": ascii.Tab}, }, { "cols": ("test 1a", "test2", "test3", "test4"), "name": "data/simple.txt", "nrows": 2, "opts": {"quotechar": "'"}, }, { "cols": ("top1", "top2", "top3", "top4"), "name": "data/simple.txt", "nrows": 1, "opts": {"quotechar": "'", "header_start": 1, "data_start": 2}, }, { "cols": ("top1", "top2", "top3", "top4"), "name": "data/simple.txt", "nrows": 1, "opts": {"quotechar": "'", "header_start": 1}, }, { "cols": ("top1", "top2", "top3", "top4"), "name": "data/simple.txt", "nrows": 2, "opts": {"quotechar": "'", "header_start": 1, "data_start": 1}, }, { "cols": ("obsid", "redshift", "X", "Y", "object", "rad"), "name": "data/simple2.txt", "nrows": 3, "opts": {"delimiter": "|"}, }, { "cols": ("obsid", "redshift", "X", "Y", "object", "rad"), "name": "data/simple3.txt", "nrows": 2, "opts": {"delimiter": "|"}, }, { "cols": ("col1", "col2", "col3", "col4", "col5", "col6"), "name": "data/simple4.txt", "nrows": 3, "opts": {"Reader": ascii.NoHeader, "delimiter": "|"}, }, { "cols": ("col1", "col2", "col3"), "name": "data/space_delim_no_header.dat", "nrows": 2, "opts": {"Reader": ascii.NoHeader}, }, { "cols": ("col1", "col2", "col3"), "name": "data/space_delim_no_header.dat", "nrows": 2, "opts": {"Reader": ascii.NoHeader, "header_start": None}, }, { "cols": ("obsid", "offset", "x", "y", "name", "oaa"), "name": "data/space_delim_blank_lines.txt", "nrows": 3, "opts": {}, }, { "cols": ("zabs1.nh", "p1.gamma", "p1.ampl", "statname", "statval"), "name": "data/test4.dat", "nrows": 9, "opts": {}, }, { "cols": ("a", "b", "c"), "name": "data/fill_values.txt", "nrows": 2, "opts": {"delimiter": ","}, }, { "name": "data/whitespace.dat", "cols": ("quoted colname with tab\tinside", "col2", "col3"), "nrows": 2, "opts": {"delimiter": r"\s"}, }, { "name": "data/simple_csv.csv", "cols": ("a", "b", "c"), "nrows": 2, "opts": {"Reader": ascii.Csv}, }, { "name": "data/simple_csv_missing.csv", "cols": ("a", "b", "c"), "nrows": 2, "skip": True, "opts": {"Reader": ascii.Csv}, }, { "cols": ("cola", "colb", "colc"), "name": "data/latex1.tex", "nrows": 2, "opts": {"Reader": ascii.Latex}, }, { "cols": ("Facility", "Id", "exposure", "date"), "name": "data/latex2.tex", "nrows": 3, "opts": {"Reader": ascii.AASTex}, }, { "cols": ("cola", "colb", "colc"), "name": "data/latex3.tex", "nrows": 2, "opts": {"Reader": ascii.Latex}, }, { "cols": ("Col1", "Col2", "Col3", "Col4"), "name": "data/fixed_width_2_line.txt", "nrows": 2, "opts": {"Reader": ascii.FixedWidthTwoLine}, }, ] try: import bs4 # noqa: F401 testfiles.append( { "cols": ("Column 1", "Column 2", "Column 3"), "name": "data/html.html", "nrows": 3, "opts": {"Reader": ascii.HTML}, } ) except ImportError: pass if name is not None: # If there are multiple matches then return a list, else return just # the one match. out = [x for x in testfiles if x["name"] == name] if len(out) == 1: out = out[0] else: out = testfiles return out def test_header_start_exception(): """Check certain Readers throw an exception if ``header_start`` is set For certain Readers it does not make sense to set the ``header_start``, they throw an exception if you try. This was implemented in response to issue #885. """ for readerclass in [ ascii.NoHeader, ascii.SExtractor, ascii.Ipac, ascii.BaseReader, ascii.FixedWidthNoHeader, ascii.Cds, ascii.Mrt, ascii.Daophot, ]: with pytest.raises(ValueError): ascii.core._get_reader(readerclass, header_start=5) def test_csv_table_read(): """ Check for a regression introduced by #1935. Pseudo-CSV file with commented header line. """ lines = ["# a, b", "1, 2", "3, 4"] t = ascii.read(lines) assert t.colnames == ["a", "b"] @pytest.mark.parametrize("fast_reader", [True, False, "force"]) def test_overlapping_names(fast_reader): """ Check that the names argument list can overlap with the existing column names. This tests the issue in #1991. """ t = ascii.read(["a b", "1 2"], names=["b", "a"], fast_reader=fast_reader) assert t.colnames == ["b", "a"] def test_sextractor_units(): """ Make sure that the SExtractor reader correctly inputs descriptions and units. """ table = ascii.read("data/sextractor2.dat", Reader=ascii.SExtractor, guess=False) expected_units = [ None, Unit("pix"), Unit("pix"), Unit("mag"), Unit("mag"), None, Unit("pix**2"), Unit("m**(-6)"), Unit("mag * arcsec**(-2)"), ] expected_descrs = [ "Running object number", "Windowed position estimate along x", "Windowed position estimate along y", "Kron-like elliptical aperture magnitude", "RMS error for AUTO magnitude", "Extraction flags", None, "Barycenter position along MAMA x axis", "Peak surface brightness above background", ] for i, colname in enumerate(table.colnames): assert table[colname].unit == expected_units[i] assert table[colname].description == expected_descrs[i] def test_sextractor_last_column_array(): """ Make sure that the SExtractor reader handles the last column correctly when it is array-like. """ table = ascii.read("data/sextractor3.dat", Reader=ascii.SExtractor, guess=False) expected_columns = [ "X_IMAGE", "Y_IMAGE", "ALPHA_J2000", "DELTA_J2000", "MAG_AUTO", "MAGERR_AUTO", "MAG_APER", "MAG_APER_1", "MAG_APER_2", "MAG_APER_3", "MAG_APER_4", "MAG_APER_5", "MAG_APER_6", "MAGERR_APER", "MAGERR_APER_1", "MAGERR_APER_2", "MAGERR_APER_3", "MAGERR_APER_4", "MAGERR_APER_5", "MAGERR_APER_6", ] expected_units = [ Unit("pix"), Unit("pix"), Unit("deg"), Unit("deg"), Unit("mag"), Unit("mag"), Unit("mag"), Unit("mag"), Unit("mag"), Unit("mag"), Unit("mag"), Unit("mag"), Unit("mag"), Unit("mag"), Unit("mag"), Unit("mag"), Unit("mag"), Unit("mag"), Unit("mag"), Unit("mag"), ] expected_descrs = ( [ "Object position along x", None, "Right ascension of barycenter (J2000)", "Declination of barycenter (J2000)", "Kron-like elliptical aperture magnitude", "RMS error for AUTO magnitude", ] + ["Fixed aperture magnitude vector"] * 7 + ["RMS error vector for fixed aperture mag."] * 7 ) for i, colname in enumerate(table.colnames): assert table[colname].name == expected_columns[i] assert table[colname].unit == expected_units[i] assert table[colname].description == expected_descrs[i] def test_list_with_newlines(): """ Check that lists of strings where some strings consist of just a newline ("\n") are parsed correctly. """ t = ascii.read(["abc", "123\n", "456\n", "\n", "\n"]) assert t.colnames == ["abc"] assert len(t) == 2 assert t[0][0] == 123 assert t[1][0] == 456 def test_commented_csv(): """ Check that Csv reader does not have ignore lines with the # comment character which is defined for most Basic readers. """ t = ascii.read(["#a,b", "1,2", "#3,4"], format="csv") assert t.colnames == ["#a", "b"] assert len(t) == 2 assert t["#a"][1] == "#3" def test_meta_comments(): """ Make sure that line comments are included in the ``meta`` attribute of the output Table. """ t = ascii.read(["#comment1", "# comment2 \t", "a,b,c", "1,2,3"]) assert t.colnames == ["a", "b", "c"] assert t.meta["comments"] == ["comment1", "comment2"] def test_guess_fail(): """ Check the error message when guess fails """ with pytest.raises(ascii.InconsistentTableError) as err: ascii.read("asfdasdf\n1 2 3", format="basic") assert "** To figure out why the table did not read, use guess=False and" in str( err.value ) # Test the case with guessing enabled but for a format that has no free params with pytest.raises(ValueError) as err: ascii.read("asfdasdf\n1 2 3", format="ipac") assert ( "At least one header line beginning and ending with delimiter required" in str(err.value) ) # Test the case with guessing enabled but with all params specified with pytest.raises(ValueError) as err: ascii.read( "asfdasdf\n1 2 3", format="basic", quotechar='"', delimiter=" ", fast_reader=False, ) assert "Number of header columns (1) inconsistent with data columns (3)" in str( err.value ) @pytest.mark.xfail(not HAS_BZ2, reason="requires bz2") def test_guessing_file_object(): """ Test guessing a file object. Fixes #3013 and similar issue noted in #3019. """ with open("data/ipac.dat.bz2", "rb") as fd: t = ascii.read(fd) assert t.colnames == ["ra", "dec", "sai", "v2", "sptype"] def test_pformat_roundtrip(): """Check that the screen output of ``print tab`` can be read. See #3025.""" """Read a table with empty values and ensure that corresponding entries are masked""" table = "\n".join(["a,b,c,d", "1,3,1.11,1", "2, 2, 4.0 , ss "]) dat = ascii.read(table) out = ascii.read(dat.pformat()) assert len(dat) == len(out) assert dat.colnames == out.colnames for c in dat.colnames: assert np.all(dat[c] == out[c]) def test_ipac_abbrev(): lines = [ "| c1 | c2 | c3 | c4 | c5| c6 | c7 | c8 | c9|c10|c11|c12|", "| r | rE | rea | real | D | do | dou | f | i | l | da| c |", " 1 2 3 4 5 6 7 8 9 10 11 12 ", ] dat = ascii.read(lines, format="ipac") for name in dat.columns[0:8]: assert dat[name].dtype.kind == "f" for name in dat.columns[8:10]: assert dat[name].dtype.kind == "i" for name in dat.columns[10:12]: assert dat[name].dtype.kind in ("U", "S") def test_almost_but_not_quite_daophot(): """Regression test for #3319. This tables looks so close to a daophot table, that the daophot reader gets quite far before it fails with an AttributeError. Note that this table will actually be read as Commented Header table with the columns ['some', 'header', 'info']. """ lines = [ "# some header info", "#F header info beginning with 'F'", "1 2 3", "4 5 6", "7 8 9", ] dat = ascii.read(lines) assert len(dat) == 3 @pytest.mark.parametrize("fast", [False, "force"]) def test_commented_header_comments(fast): """ Test that comments in commented_header are as expected with header_start at different positions, and that the table round-trips. """ comments = ["comment 1", "comment 2", "comment 3"] lines = ["# a b", "# comment 1", "# comment 2", "# comment 3", "1 2", "3 4"] dat = ascii.read(lines, format="commented_header", fast_reader=fast) assert dat.meta["comments"] == comments assert dat.colnames == ["a", "b"] out = StringIO() ascii.write(dat, out, format="commented_header", fast_writer=fast) assert out.getvalue().splitlines() == lines lines.insert(1, lines.pop(0)) dat = ascii.read(lines, format="commented_header", header_start=1, fast_reader=fast) assert dat.meta["comments"] == comments assert dat.colnames == ["a", "b"] lines.insert(2, lines.pop(1)) dat = ascii.read(lines, format="commented_header", header_start=2, fast_reader=fast) assert dat.meta["comments"] == comments assert dat.colnames == ["a", "b"] dat = ascii.read( lines, format="commented_header", header_start=-2, fast_reader=fast ) assert dat.meta["comments"] == comments assert dat.colnames == ["a", "b"] lines.insert(3, lines.pop(2)) dat = ascii.read( lines, format="commented_header", header_start=-1, fast_reader=fast ) assert dat.meta["comments"] == comments assert dat.colnames == ["a", "b"] lines = ["# a b", "1 2", "3 4"] dat = ascii.read(lines, format="commented_header", fast_reader=fast) assert "comments" not in dat.meta assert dat.colnames == ["a", "b"] def test_probably_html(home_is_data): """ Test the routine for guessing if a table input to ascii.read is probably HTML """ for tabl0 in ( "data/html.html", "~/html.html", "http://blah.com/table.html", "https://blah.com/table.html", "file://blah/table.htm", "ftp://blah.com/table.html", "file://blah.com/table.htm", " <! doctype html > hello world", "junk < table baz> <tr foo > <td bar> </td> </tr> </table> junk", [ "junk < table baz>", " <tr foo >", " <td bar> ", "</td> </tr>", "</table> junk", ], (" <! doctype html > ", " hello world"), ): assert _probably_html(tabl0) is True for tabl0 in ( "data/html.htms", "Xhttp://blah.com/table.html", " https://blah.com/table.htm", "fole://blah/table.htm", " < doctype html > hello world", "junk < tble baz> <tr foo > <td bar> </td> </tr> </table> junk", [ "junk < table baz>", " <t foo >", " <td bar> ", "</td> </tr>", "</table> junk", ], (" <! doctype htm > ", " hello world"), [[1, 2, 3]], ): assert _probably_html(tabl0) is False @pytest.mark.parametrize("fast_reader", [True, False, "force"]) def test_data_header_start(fast_reader): tests = [ ( [ "# comment", "", " ", "skip this line", # line 0 "a b", # line 1 "1 2", ], # line 2 [{"header_start": 1}, {"header_start": 1, "data_start": 2}], ), ( [ "# comment", "", " \t", "skip this line", # line 0 "a b", # line 1 "", " \t", "skip this line", # line 2 "1 2", ], # line 3 [{"header_start": 1, "data_start": 3}], ), ( [ "# comment", "", " ", "a b", # line 0 "", " ", "skip this line", # line 1 "1 2", ], # line 2 [{"header_start": 0, "data_start": 2}, {"data_start": 2}], ), ] for lines, kwargs_list in tests: for kwargs in kwargs_list: t = ascii.read( lines, format="basic", fast_reader=fast_reader, guess=True, **kwargs ) assert t.colnames == ["a", "b"] assert len(t) == 1 assert np.all(t["a"] == [1]) # Sanity check that the expected Reader is being used assert get_read_trace()[-1]["kwargs"]["Reader"] is ( ascii.Basic if (fast_reader is False) else ascii.FastBasic ) def test_table_with_no_newline(): """ Test that an input file which is completely empty fails in the expected way. Test that an input file with one line but no newline succeeds. """ # With guessing table = BytesIO() with pytest.raises(ascii.InconsistentTableError): ascii.read(table) # Without guessing table = BytesIO() with pytest.raises(ValueError) as err: ascii.read(table, guess=False, fast_reader=False, format="basic") assert "No header line found" in str(err.value) table = BytesIO() t = ascii.read(table, guess=False, fast_reader=True, format="fast_basic") assert not t and t.as_array().size == 0 # Put a single line of column names but with no newline for kwargs in [ dict(), dict(guess=False, fast_reader=False, format="basic"), dict(guess=False, fast_reader=True, format="fast_basic"), ]: table = BytesIO() table.write(b"a b") t = ascii.read(table, **kwargs) assert t.colnames == ["a", "b"] assert len(t) == 0 def test_path_object(): fpath = pathlib.Path("data/simple.txt") data = ascii.read(fpath) assert len(data) == 2 assert sorted(list(data.columns)) == ["test 1a", "test2", "test3", "test4"] assert data["test2"][1] == "hat2" def test_column_conversion_error(): """ Test that context information (upstream exception message) from column conversion error is provided. """ ipac = """\ | col0 | | double | 1 2 """ with pytest.raises(ValueError) as err: ascii.read(ipac, guess=False, format="ipac") assert "Column col0 failed to convert:" in str(err.value) with pytest.raises(ValueError) as err: ascii.read(["a b", "1 2"], guess=False, format="basic", converters={"a": []}) assert "no converters" in str(err.value) def test_non_C_locale_with_fast_reader(): """Test code that forces "C" locale while calling fast reader (#4364)""" current = locale.setlocale(locale.LC_ALL) try: if platform.system() == "Darwin": locale.setlocale(locale.LC_ALL, "fr_FR") else: locale.setlocale(locale.LC_ALL, "fr_FR.utf8") for fast_reader in ( True, False, {"use_fast_converter": False}, {"use_fast_converter": True}, ): t = ascii.read( ["a b", "1.5 2"], format="basic", guess=False, fast_reader=fast_reader ) assert t["a"].dtype.kind == "f" except locale.Error as e: pytest.skip(f"Locale error: {e}") finally: locale.setlocale(locale.LC_ALL, current) def test_no_units_for_char_columns(): """Test that a char column of a Table is assigned no unit and not a dimensionless unit.""" t1 = Table([["A"]], names="B") out = StringIO() ascii.write(t1, out, format="ipac") t2 = ascii.read(out.getvalue(), format="ipac", guess=False) assert t2["B"].unit is None def test_initial_column_fill_values(): """Regression test for #5336, #5338.""" class TestHeader(ascii.BasicHeader): def _set_cols_from_names(self): self.cols = [ascii.Column(name=x) for x in self.names] # Set some initial fill values for col in self.cols: col.fill_values = {"--": "0"} class Tester(ascii.Basic): header_class = TestHeader reader = ascii.get_reader(Reader=Tester) assert ( reader.read( """# Column definition is the first uncommented line # Default delimiter is the space character. a b c # Data starts after the header column definition, blank lines ignored -- 2 3 4 5 6 """ )["a"][0] is np.ma.masked ) def test_latex_no_trailing_backslash(): """ Test that latex/aastex file with no trailing backslash can be read. """ lines = r""" \begin{table} \begin{tabular}{ccc} a & b & c \\ 1 & 1.0 & c \\ % comment 3\% & 3.0 & e % comment \end{tabular} \end{table} """ dat = ascii.read(lines, format="latex") assert dat.colnames == ["a", "b", "c"] assert np.all(dat["a"] == ["1", r"3\%"]) assert np.all(dat["c"] == ["c", "e"]) def text_aastex_no_trailing_backslash(): lines = r""" \begin{deluxetable}{ccc} \tablehead{\colhead{a} & \colhead{b} & \colhead{c}} \startdata 1 & 1.0 & c \\ 2 & 2.0 & d \\ % comment 3\% & 3.0 & e % comment \enddata \end{deluxetable} """ dat = ascii.read(lines, format="aastex") assert dat.colnames == ["a", "b", "c"] assert np.all(dat["a"] == ["1", r"3\%"]) assert np.all(dat["c"] == ["c", "e"]) @pytest.mark.parametrize("encoding", ["utf8", "latin1", "cp1252"]) def test_read_with_encoding(tmp_path, encoding): data = {"commented_header": "# à b è \n 1 2 héllo", "csv": "à,b,è\n1,2,héllo"} testfile = tmp_path / "test.txt" for fmt, content in data.items(): with open(testfile, "w", encoding=encoding) as f: f.write(content) table = ascii.read(testfile, encoding=encoding) assert table.pformat() == [" à b è ", "--- --- -----", " 1 2 héllo"] for guess in (True, False): table = ascii.read( testfile, format=fmt, fast_reader=False, encoding=encoding, guess=guess ) assert table["è"].dtype.kind == "U" assert table.pformat() == [ " à b è ", "--- --- -----", " 1 2 héllo", ] def test_unsupported_read_with_encoding(): # Fast reader is not supported, make sure it raises an exception with pytest.raises(ascii.ParameterError): ascii.read( "data/simple3.txt", guess=False, fast_reader="force", encoding="latin1", format="fast_csv", ) def test_read_chunks_input_types(): """ Test chunked reading for different input types: file path, file object, and string input. """ fpath = "data/test5.dat" t1 = ascii.read( fpath, header_start=1, data_start=3, ) with open(fpath) as fd1, open(fpath) as fd2: for fp in (fpath, fd1, fd2.read()): t_gen = ascii.read( fp, header_start=1, data_start=3, guess=False, format="fast_basic", fast_reader={"chunk_size": 400, "chunk_generator": True}, ) ts = list(t_gen) for t in ts: for col, col1 in zip(t.columns.values(), t1.columns.values()): assert col.name == col1.name assert col.dtype.kind == col1.dtype.kind assert len(ts) == 4 t2 = table.vstack(ts) assert np.all(t1 == t2) with open(fpath) as fd1, open(fpath) as fd2: for fp in (fpath, fd1, fd2.read()): # Now read the full table in chunks t3 = ascii.read( fp, header_start=1, data_start=3, fast_reader={"chunk_size": 300} ) assert np.all(t1 == t3) @pytest.mark.parametrize("masked", [True, False]) def test_read_chunks_formats(masked): """ Test different supported formats for chunked reading. """ t1 = simple_table(size=102, cols=10, kinds="fS", masked=masked) for i, name in enumerate(t1.colnames): t1.rename_column(name, f"col{i + 1}") # TO DO commented_header does not currently work due to the special-cased # implementation of header parsing. for format in "tab", "csv", "no_header", "rdb", "basic": out = StringIO() ascii.write(t1, out, format=format) t_gen = ascii.read( out.getvalue(), format=format, fast_reader={"chunk_size": 400, "chunk_generator": True}, ) ts = list(t_gen) for t in ts: for col, col1 in zip(t.columns.values(), t1.columns.values()): assert col.name == col1.name assert col.dtype.kind == col1.dtype.kind assert len(ts) > 4 t2 = table.vstack(ts) assert np.all(t1 == t2) # Now read the full table in chunks t3 = ascii.read(out.getvalue(), format=format, fast_reader={"chunk_size": 400}) assert np.all(t1 == t3) def test_read_chunks_chunk_size_too_small(): fpath = "data/test5.dat" with pytest.raises(ValueError) as err: ascii.read(fpath, header_start=1, data_start=3, fast_reader={"chunk_size": 10}) assert "no newline found in chunk (chunk_size too small?)" in str(err.value) def test_read_chunks_table_changes(): """Column changes type or size between chunks. This also tests the case with no final newline. """ col = ["a b c"] + ["1.12334 xyz a"] * 50 + ["abcdefg 555 abc"] * 50 table = "\n".join(col) t1 = ascii.read(table, guess=False) t2 = ascii.read(table, fast_reader={"chunk_size": 100}) # This also confirms that the dtypes are exactly the same, i.e. # the string itemsizes are the same. assert np.all(t1 == t2) def test_read_non_ascii(): """Test that pure-Python reader is used in case the file contains non-ASCII characters in it. """ table = Table.read(["col1, col2", "\u2119, \u01b4", "1, 2"], format="csv") assert np.all(table["col1"] == ["\u2119", "1"]) assert np.all(table["col2"] == ["\u01b4", "2"]) @pytest.mark.parametrize("enable", [True, False, "force"]) def test_kwargs_dict_guess(enable): """Test that fast_reader dictionary is preserved through guessing sequence.""" # Fails for enable=(True, 'force') - #5578 ascii.read("a\tb\n 1\t2\n3\t 4.0", fast_reader=dict(enable=enable)) assert get_read_trace()[-1]["kwargs"]["Reader"] is ( ascii.Tab if (enable is False) else ascii.FastTab ) for k in get_read_trace(): if not k.get("status", "Disabled").startswith("Disabled"): assert k.get("kwargs").get("fast_reader").get("enable") is enable def _get_lines(rdb): lines = ["a a_2 a_1 a a"] if rdb: lines += ["N N N N N"] lines += ["1 2 3 4 5", "10 20 30 40 50"] if rdb: lines = ["\t".join(line.split()) for line in lines] return lines @pytest.mark.parametrize("rdb", [False, True]) @pytest.mark.parametrize("fast_reader", [False, "force"]) def test_deduplicate_names_basic(rdb, fast_reader): """Test that duplicate column names are successfully de-duplicated for the basic format. Skip the case of rdb=True and fast_reader='force' when selecting include_names, since that fails and is tested below. """ lines = _get_lines(rdb) dat = ascii.read(lines, fast_reader=fast_reader) assert dat.colnames == ["a", "a_2", "a_1", "a_3", "a_4"] assert len(dat) == 2 dat = ascii.read(lines, fast_reader=fast_reader, include_names=["a", "a_2", "a_3"]) assert len(dat) == 2 assert dat.colnames == ["a", "a_2", "a_3"] assert np.all(dat["a"] == [1, 10]) assert np.all(dat["a_2"] == [2, 20]) assert np.all(dat["a_3"] == [4, 40]) dat = ascii.read( lines, fast_reader=fast_reader, names=["b1", "b2", "b3", "b4", "b5"], include_names=["b1", "b2", "a_4", "b4"], ) assert len(dat) == 2 assert dat.colnames == ["b1", "b2", "b4"] assert np.all(dat["b1"] == [1, 10]) assert np.all(dat["b2"] == [2, 20]) assert np.all(dat["b4"] == [4, 40]) dat = ascii.read( lines, fast_reader=fast_reader, names=["b1", "b2", "b3", "b4", "b5"], exclude_names=["b3", "b5", "a_3", "a_4"], ) assert len(dat) == 2 assert dat.colnames == ["b1", "b2", "b4"] assert np.all(dat["b1"] == [1, 10]) assert np.all(dat["b2"] == [2, 20]) assert np.all(dat["b4"] == [4, 40]) def test_include_names_rdb_fast(): """Test that selecting column names via `include_names` works for the RDB format with fast reader. This is testing the fix for a bug identified in #9939. """ lines = _get_lines(True) lines[0] = "a\ta_2\ta_1\ta_3\ta_4" dat = ascii.read(lines, fast_reader="force", include_names=["a", "a_2", "a_3"]) assert len(dat) == 2 assert dat["a"].dtype == int assert dat["a_2"].dtype == int @pytest.mark.parametrize("fast_reader", [False, "force"]) def test_deduplicate_names_with_types(fast_reader): """Test that on selecting column names via `include_names` in the RDB format with different types and duplicate column names type assignment is correctly preserved. """ lines = _get_lines(True) lines[1] = "N\tN\tN\tS\tS" dat = ascii.read(lines, fast_reader=fast_reader, include_names=["a", "a_2", "a_3"]) assert len(dat) == 2 assert dat["a_2"].dtype.kind == "i" assert dat["a_3"].dtype.kind == "U" dat = ascii.read( lines, fast_reader=fast_reader, names=["b1", "b2", "b3", "b4", "b5"], include_names=["a1", "a_2", "b1", "b2", "b4"], ) assert len(dat) == 2 assert dat.colnames == ["b1", "b2", "b4"] assert dat["b2"].dtype.kind == "i" assert dat["b4"].dtype.kind == "U" @pytest.mark.parametrize("rdb", [False, True]) @pytest.mark.parametrize("fast_reader", [False, "force"]) def test_set_invalid_names(rdb, fast_reader): """ Test exceptions for invalid (duplicate or `None`) names specified via argument. """ lines = _get_lines(rdb) if rdb: fmt = "rdb" else: fmt = "basic" with pytest.raises(ValueError) as err: ascii.read( lines, fast_reader=fast_reader, format=fmt, guess=rdb, names=["b1", "b2", "b1", "b4", "b5"], ) assert "Duplicate column names" in str(err.value) with pytest.raises(TypeError) as err: ascii.read( lines, fast_reader=fast_reader, format=fmt, guess=rdb, names=["b1", "b2", "b1", None, None], ) assert "Cannot have None for column name" in str(err.value) def test_read_masked_bool(): txt = """\ col0 col1 1 1 0 2 True 3 "" 4 False 5 """ # Reading without converters returns col0 as a string dat = ascii.read(txt, format="basic") col = dat["col0"] assert isinstance(col, MaskedColumn) assert col.dtype.kind == "U" assert col[0] == "1" # Force col0 to be read as bool converters = {"col0": [convert_numpy(bool)]} dat = ascii.read(txt, format="basic", converters=converters) col = dat["col0"] assert isinstance(col, MaskedColumn) assert col.dtype.kind == "b" assert np.all(col.mask == [False, False, False, True, False]) assert np.all(col == [True, False, True, False, False]) def test_read_converters_wildcard(): """Test converters where the column name is specified with a wildcard. """ converters = {"F*": [ascii.convert_numpy(np.float32)]} t = ascii.read(["Fabc Iabc", "1 2"], converters=converters) assert np.issubdtype(t["Fabc"].dtype, np.float32) assert not np.issubdtype(t["Iabc"].dtype, np.float32) def test_read_converters_simplified(): """Test providing io.ascii read converters as type or dtypes instead of convert_numpy(type) outputs""" t = Table() t["a"] = [1, 2] t["b"] = [3.5, 4] t["c"] = ["True", "False"] t["d"] = ["true", "false"] # Looks kindof like boolean but actually a string t["e"] = [5, 6] out = StringIO() t.write(out, format="ascii.basic") converters = {"a": str, "e": np.float32} t2 = Table.read(out.getvalue(), format="ascii.basic", converters=converters) assert t2.pformat(show_dtype=True) == [ " a b c d e ", "str1 float64 str5 str5 float32", "---- ------- ----- ----- -------", " 1 3.5 True true 5.0", " 2 4.0 False false 6.0", ] converters = {"a": float, "*": [np.int64, float, bool, str]} t2 = Table.read(out.getvalue(), format="ascii.basic", converters=converters) assert t2.pformat_all(show_dtype=True) == [ " a b c d e ", "float64 float64 bool str5 int64", "------- ------- ----- ----- -----", " 1.0 3.5 True true 5", " 2.0 4.0 False false 6", ] # Test failures for converters in ( {"*": [int, 1, bool, str]}, # bad converter type # Tuple converter where 2nd element is not a subclass of NoType {"a": [(int, int)]}, # Tuple converter with 3 elements not 2 {"a": [(int, int, int)]}, ): with pytest.raises(ValueError, match="Error: invalid format for converters"): t2 = Table.read( out.getvalue(), format="ascii.basic", converters=converters, guess=False )
ce2857974461da5653be496c34dee5bdf51dbb17b73611cff4961f63ea2ece47
# Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np import pytest from astropy import units as u from astropy.io import ascii from .common import setup_function # noqa: F401 from .common import teardown_function # noqa: F401 from .common import assert_almost_equal, assert_equal def read_table1(readme, data): reader = ascii.Cds(readme) return reader.read(data) def read_table2(readme, data): reader = ascii.get_reader(Reader=ascii.Cds, readme=readme) reader.outputter = ascii.TableOutputter() return reader.read(data) def read_table3(readme, data): return ascii.read(data, readme=readme) def test_description(): readme = "data/cds/description/ReadMe" data = "data/cds/description/table.dat" for read_table in (read_table1, read_table2, read_table3): table = read_table(readme, data) assert_equal(len(table), 2) assert_equal(table["Cluster"].description, "Cluster name") assert_equal(table["Star"].description, "") assert_equal(table["Wave"].description, "wave? Wavelength in Angstroms") assert_equal(table["El"].description, "a") assert_equal( table["ion"].description, "- Ionization stage (1 for neutral element)" ) assert_equal(table["EW"].description, "Equivalent width (in mA)") assert_equal( table["Q"].description, "DAOSPEC quality parameter Q(large values are bad)" ) def test_multi_header(): readme = "data/cds/multi/ReadMe" data = "data/cds/multi/lhs2065.dat" for read_table in (read_table1, read_table2, read_table3): table = read_table(readme, data) assert_equal(len(table), 18) assert_almost_equal(table["Lambda"][-1], 6479.32) assert_equal(table["Fnu"][-1], "0.285937") data = "data/cds/multi/lp944-20.dat" for read_table in (read_table1, read_table2, read_table3): table = read_table(readme, data) assert_equal(len(table), 18) assert_almost_equal(table["Lambda"][0], 6476.09) assert_equal(table["Fnu"][-1], "0.489005") def test_glob_header(): readme = "data/cds/glob/ReadMe" data = "data/cds/glob/lmxbrefs.dat" for read_table in (read_table1, read_table2, read_table3): table = read_table(readme, data) assert_equal(len(table), 291) assert_equal(table["Name"][-1], "J1914+0953") assert_equal(table["BibCode"][-2], "2005A&A...432..235R") def test_header_from_readme(): r = ascii.Cds("data/vizier/ReadMe") table = r.read("data/vizier/table1.dat") assert len(r.data.data_lines) == 15 assert len(table) == 15 assert len(table.keys()) == 18 Bmag = [ 14.79, 15.00, 14.80, 12.38, 12.36, 12.24, 13.75, 13.65, 13.41, 11.59, 11.68, 11.53, 13.92, 14.03, 14.18, ] for i, val in enumerate(table.field("Bmag")): assert val == Bmag[i] table = r.read("data/vizier/table5.dat") assert len(r.data.data_lines) == 49 assert len(table) == 49 assert len(table.keys()) == 10 Q = [ 0.289, 0.325, 0.510, 0.577, 0.539, 0.390, 0.957, 0.736, 1.435, 1.117, 1.473, 0.808, 1.416, 2.209, 0.617, 1.046, 1.604, 1.419, 1.431, 1.183, 1.210, 1.005, 0.706, 0.665, 0.340, 0.323, 0.391, 0.280, 0.343, 0.369, 0.495, 0.828, 1.113, 0.499, 1.038, 0.260, 0.863, 1.638, 0.479, 0.232, 0.627, 0.671, 0.371, 0.851, 0.607, -9.999, 1.958, 1.416, 0.949, ] for i, val in enumerate(table.field("Q")): if val is np.ma.masked: # text value for a missing value in that table assert Q[i] == -9.999 else: assert val == Q[i] @pytest.mark.parametrize("reader_cls", (ascii.Cds, ascii.Mrt)) def test_cds_units(reader_cls): from astropy import units data_and_readme = "data/cds.dat" reader = ascii.get_reader(reader_cls) table = reader.read(data_and_readme) # column unit is GMsun (giga solar masses) # make sure this is parsed correctly, not as a "string" unit assert table["Fit"].to(units.solMass).unit == units.solMass @pytest.mark.parametrize("reader_cls", (ascii.Cds, ascii.Mrt)) def test_cds_function_units(reader_cls): data_and_readme = "data/cdsFunctional.dat" reader = ascii.get_reader(reader_cls) table = reader.read(data_and_readme) assert table["logg"].unit == u.dex(u.cm / u.s**2) assert table["logTe"].unit == u.dex(u.K) assert table["Mass"].unit == u.Msun assert table["e_Mass"].unit == u.Msun assert table["Age"].unit == u.Myr assert table["e_Age"].unit == u.Myr @pytest.mark.parametrize("reader_cls", (ascii.Cds, ascii.Mrt)) def test_cds_function_units2(reader_cls): # This one includes some dimensionless dex. data_and_readme = "data/cdsFunctional2.dat" reader = ascii.get_reader(reader_cls) table = reader.read(data_and_readme) assert table["Teff"].unit == u.K assert table["logg"].unit == u.dex(u.cm / u.s**2) assert table["vturb"].unit == u.km / u.s assert table["[Fe/H]"].unit == u.dex(u.one) assert table["e_[Fe/H]"].unit == u.dex(u.one) assert_almost_equal( table["[Fe/H]"].to(u.one), 10.0 ** (np.array([-2.07, -1.50, -2.11, -1.64])) ) def test_cds_ignore_nullable(): # Make sure CDS Reader does not ignore nullabilty for columns # with a limit specifier readme = "data/cds/null/ReadMe" data = "data/cds/null/table.dat" r = ascii.Cds(readme) r.read(data) assert_equal(r.header.cols[6].description, "Temperature class codified (10)") assert_equal(r.header.cols[8].description, "Luminosity class codified (11)") assert_equal(r.header.cols[5].description, "Pericenter position angle (18)") def test_cds_no_whitespace(): # Make sure CDS Reader only checks null values when an '=' symbol is present, # and read description text even if there is no whitespace after '?'. readme = "data/cds/null/ReadMe1" data = "data/cds/null/table.dat" r = ascii.Cds(readme) r.read(data) assert_equal(r.header.cols[6].description, "Temperature class codified (10)") assert_equal(r.header.cols[6].null, "") assert_equal(r.header.cols[7].description, "Equivalent width (in mA)") assert_equal(r.header.cols[7].null, "-9.9") assert_equal( r.header.cols[10].description, "DAOSPEC quality parameter Q(large values are bad)", ) assert_equal(r.header.cols[10].null, "-9.999") def test_cds_order(): # Make sure CDS Reader does not ignore order specifier that maybe present after # the null specifier '?' readme = "data/cds/null/ReadMe1" data = "data/cds/null/table.dat" r = ascii.Cds(readme) r.read(data) assert_equal(r.header.cols[5].description, "Catalogue Identification Number") assert_equal(r.header.cols[8].description, "Equivalent width (in mA)") assert_equal(r.header.cols[9].description, "Luminosity class codified (11)") if __name__ == "__main__": # run from main directory; not from test/ test_header_from_readme() test_multi_header() test_glob_header() test_description() test_cds_units() test_cds_ignore_nullable() test_cds_no_whitespace() test_cds_order()
eb07c62c45f3410c4119867211be9f16a979db2d7d80a9c47a69ef0ec1ef767e
# Licensed under a 3-clause BSD style license - see LICENSE.rst from io import StringIO import numpy as np import astropy.units as u from astropy.io import ascii from astropy.table import QTable from .common import assert_almost_equal, assert_equal def assert_equal_splitlines(arg1, arg2): assert_equal(arg1.splitlines(), arg2.splitlines()) def test_read_normal(): """Normal SimpleRST Table""" table = """ # comment (with blank line above) ======= ========= Col1 Col2 ======= ========= 1.2 "hello" 2.4 's worlds ======= ========= """ reader = ascii.get_reader(Reader=ascii.RST) dat = reader.read(table) assert_equal(dat.colnames, ["Col1", "Col2"]) assert_almost_equal(dat[1][0], 2.4) assert_equal(dat[0][1], '"hello"') assert_equal(dat[1][1], "'s worlds") def test_read_normal_names(): """Normal SimpleRST Table with provided column names""" table = """ # comment (with blank line above) ======= ========= Col1 Col2 ======= ========= 1.2 "hello" 2.4 's worlds ======= ========= """ reader = ascii.get_reader(Reader=ascii.RST, names=("name1", "name2")) dat = reader.read(table) assert_equal(dat.colnames, ["name1", "name2"]) assert_almost_equal(dat[1][0], 2.4) def test_read_normal_names_include(): """Normal SimpleRST Table with provided column names""" table = """ # comment (with blank line above) ======= ========== ====== Col1 Col2 Col3 ======= ========== ====== 1.2 "hello" 3 2.4 's worlds 7 ======= ========== ====== """ reader = ascii.get_reader( Reader=ascii.RST, names=("name1", "name2", "name3"), include_names=("name1", "name3"), ) dat = reader.read(table) assert_equal(dat.colnames, ["name1", "name3"]) assert_almost_equal(dat[1][0], 2.4) assert_equal(dat[0][1], 3) def test_read_normal_exclude(): """Nice, typical SimpleRST table with col name excluded""" table = """ ======= ========== Col1 Col2 ======= ========== 1.2 "hello" 2.4 's worlds ======= ========== """ reader = ascii.get_reader(Reader=ascii.RST, exclude_names=("Col1",)) dat = reader.read(table) assert_equal(dat.colnames, ["Col2"]) assert_equal(dat[1][0], "'s worlds") def test_read_unbounded_right_column(): """The right hand column should be allowed to overflow""" table = """ # comment (with blank line above) ===== ===== ==== Col1 Col2 Col3 ===== ===== ==== 1.2 2 Hello 2.4 4 Worlds ===== ===== ==== """ reader = ascii.get_reader(Reader=ascii.RST) dat = reader.read(table) assert_equal(dat[0][2], "Hello") assert_equal(dat[1][2], "Worlds") def test_read_unbounded_right_column_header(): """The right hand column should be allowed to overflow""" table = """ # comment (with blank line above) ===== ===== ==== Col1 Col2 Col3Long ===== ===== ==== 1.2 2 Hello 2.4 4 Worlds ===== ===== ==== """ reader = ascii.get_reader(Reader=ascii.RST) dat = reader.read(table) assert_equal(dat.colnames[-1], "Col3Long") def test_read_right_indented_table(): """We should be able to read right indented tables correctly""" table = """ # comment (with blank line above) ==== ==== ==== Col1 Col2 Col3 ==== ==== ==== 3 3.4 foo 1 4.5 bar ==== ==== ==== """ reader = ascii.get_reader(Reader=ascii.RST) dat = reader.read(table) assert_equal(dat.colnames, ["Col1", "Col2", "Col3"]) assert_equal(dat[0][2], "foo") assert_equal(dat[1][0], 1) def test_trailing_spaces_in_row_definition(): """Trailing spaces in the row definition column shouldn't matter""" table = ( "\n" "# comment (with blank line above)\n" " ==== ==== ==== \n" " Col1 Col2 Col3\n" " ==== ==== ==== \n" " 3 3.4 foo\n" " 1 4.5 bar\n" " ==== ==== ==== \n" ) # make sure no one accidentally deletes the trailing whitespaces in the # table. assert len(table) == 151 reader = ascii.get_reader(Reader=ascii.RST) dat = reader.read(table) assert_equal(dat.colnames, ["Col1", "Col2", "Col3"]) assert_equal(dat[0][2], "foo") assert_equal(dat[1][0], 1) table = """\ ====== =========== ============ =========== Col1 Col2 Col3 Col4 ====== =========== ============ =========== 1.2 "hello" 1 a 2.4 's worlds 2 2 ====== =========== ============ =========== """ dat = ascii.read(table, Reader=ascii.RST) def test_write_normal(): """Write a table as a normal SimpleRST Table""" out = StringIO() ascii.write(dat, out, Writer=ascii.RST) assert_equal_splitlines( out.getvalue(), """\ ==== ========= ==== ==== Col1 Col2 Col3 Col4 ==== ========= ==== ==== 1.2 "hello" 1 a 2.4 's worlds 2 2 ==== ========= ==== ==== """, ) def test_rst_with_header_rows(): """Round-trip a table with header_rows specified""" lines = [ "======= ======== ====", " wave response ints", " nm ct ", "float64 float32 int8", "======= ======== ====", " 350.0 1.0 1", " 950.0 2.0 2", "======= ======== ====", ] tbl = QTable.read(lines, format="ascii.rst", header_rows=["name", "unit", "dtype"]) assert tbl["wave"].unit == u.nm assert tbl["response"].unit == u.ct assert tbl["wave"].dtype == np.float64 assert tbl["response"].dtype == np.float32 assert tbl["ints"].dtype == np.int8 out = StringIO() tbl.write(out, format="ascii.rst", header_rows=["name", "unit", "dtype"]) assert out.getvalue().splitlines() == lines
189467ea7af45f1beae8db9a1f5817610c0ee3000a0e7890defc7590d32f7f3e
# Licensed under a 3-clause BSD style license - see LICENSE.rst # This file connects ASDF to the astropy.table.Table class import warnings from astropy.io import registry as io_registry from astropy.io.misc.asdf.deprecation import create_asdf_deprecation_warning from astropy.table import Table from astropy.utils.compat import optional_deps def read_table(filename, data_key=None, find_table=None, **kwargs): """ Read a `~astropy.table.Table` object from an ASDF file. This requires `asdf <https://pypi.org/project/asdf/>`_ to be installed. By default, this function will look for a Table object with the key of ``data`` in the top-level ASDF tree. The parameters ``data_key`` and ``find_key`` can be used to override the default behavior. This function is registered as the Table reader for ASDF files with the unified I/O interface. Parameters ---------- filename : str or :class:`py.lath:local` Name of the file to be read data_key : str Optional top-level key to use for finding the Table in the tree. If not provided, uses ``data`` by default. Use of this parameter is not compatible with ``find_table``. find_table : function Optional function to be used for locating the Table in the tree. The function takes a single parameter, which is a dictionary representing the top of the ASDF tree. The function must return a `~astropy.table.Table` instance. Returns ------- table : `~astropy.table.Table` `~astropy.table.Table` instance """ warnings.warn(create_asdf_deprecation_warning()) try: import asdf except ImportError: raise Exception("The asdf module is required to read and write ASDF files") if data_key and find_table: raise ValueError("Options 'data_key' and 'find_table' are not compatible") with asdf.open(filename, **kwargs) as af: if find_table: return find_table(af.tree) else: return af[data_key or "data"] def write_table(table, filename, data_key=None, make_tree=None, **kwargs): """ Write a `~astropy.table.Table` object to an ASDF file. This requires `asdf <https://pypi.org/project/asdf/>`_ to be installed. By default, this function will write a Table object in the top-level ASDF tree using the key of ``data``. The parameters ``data_key`` and ``make_tree`` can be used to override the default behavior. This function is registered as the Table writer for ASDF files with the unified I/O interface. Parameters ---------- table : `~astropy.table.Table` `~astropy.table.Table` instance to be written filename : str or :class:`py.path:local` Name of the new ASDF file to be created data_key : str Optional top-level key in the ASDF tree to use when writing the Table. If not provided, uses ``data`` by default. Use of this parameter is not compatible with ``make_tree``. make_tree : function Optional function to be used for creating the ASDF tree. The function takes a single parameter, which is the `~astropy.table.Table` instance to be written. The function must return a `dict` representing the ASDF tree to be created. """ warnings.warn(create_asdf_deprecation_warning()) try: import asdf except ImportError: raise Exception("The asdf module is required to read and write ASDF files") if data_key and make_tree: raise ValueError("Options 'data_key' and 'make_tree' are not compatible") if make_tree: tree = make_tree(table) else: tree = {data_key or "data": table} with asdf.AsdfFile(tree) as af: af.write_to(filename, **kwargs) def asdf_identify(origin, filepath, fileobj, *args, **kwargs): try: import asdf # noqa: F401 except ImportError: return False return filepath is not None and filepath.endswith(".asdf") if not optional_deps.HAS_ASDF_ASTROPY: io_registry.register_reader("asdf", Table, read_table) io_registry.register_writer("asdf", Table, write_table) io_registry.register_identifier("asdf", Table, asdf_identify)
b0a14c3bc2757fdd7ddc123164c8b3f9b975a4519f90cd7ed0bb9d3b57331f45
# Licensed under a 3-clause BSD style license - see LICENSE.rst import os from asdf.extension import AsdfExtension, BuiltinExtension from asdf.util import filepath_to_url # Make sure that all tag implementations are imported by the time we create # the extension class so that _astropy_asdf_types is populated correctly. We # could do this using __init__ files, except it causes pytest import errors in # the case that asdf is not installed. from .tags.coordinates.angle import * from .tags.coordinates.earthlocation import * from .tags.coordinates.frames import * from .tags.coordinates.representation import * from .tags.coordinates.skycoord import * from .tags.coordinates.spectralcoord import * from .tags.fits.fits import * from .tags.table.table import * from .tags.time.time import * from .tags.time.timedelta import * from .tags.transform.basic import * from .tags.transform.compound import * from .tags.transform.functional_models import * from .tags.transform.math import * from .tags.transform.physical_models import * from .tags.transform.polynomial import * from .tags.transform.powerlaws import * from .tags.transform.projections import * from .tags.transform.spline import * from .tags.transform.tabular import * from .tags.unit.equivalency import * from .tags.unit.quantity import * from .tags.unit.unit import * from .types import _astropy_asdf_types, _astropy_types __all__ = ["AstropyExtension", "AstropyAsdfExtension"] ASTROPY_SCHEMA_URI_BASE = "http://astropy.org/schemas/" SCHEMA_PATH = os.path.abspath( os.path.join(os.path.dirname(__file__), "data", "schemas") ) ASTROPY_URL_MAPPING = [ ( ASTROPY_SCHEMA_URI_BASE, filepath_to_url(os.path.join(SCHEMA_PATH, "astropy.org")) + "/{url_suffix}.yaml", ) ] # This extension is used to register custom types that have both tags and # schemas defined by Astropy. class AstropyExtension(AsdfExtension): @property def types(self): return _astropy_types @property def tag_mapping(self): return [ ("tag:astropy.org:astropy", ASTROPY_SCHEMA_URI_BASE + "astropy{tag_suffix}") ] @property def url_mapping(self): return ASTROPY_URL_MAPPING # This extension is used to register custom tag types that have schemas defined # by ASDF, but have tag implementations defined in astropy. class AstropyAsdfExtension(BuiltinExtension): @property def types(self): return _astropy_asdf_types
e33b1fde8af781cbcad08a8e3e9f9654ca8380847a00307d5a7e86643dc932b5
# Licensed under a 3-clause BSD style license - see LICENSE.rst import numpy as np import pytest from astropy import units as u from astropy.coordinates import ( Angle, CartesianRepresentation, EarthLocation, Latitude, Longitude, SkyCoord, SphericalCosLatDifferential, SphericalRepresentation, ) from astropy.io.misc.parquet import get_pyarrow, parquet_identify from astropy.table import Column, NdarrayMixin, QTable, Table from astropy.table.table_helpers import simple_table from astropy.time import Time, TimeDelta from astropy.units import allclose as quantity_allclose from astropy.units.quantity import QuantityInfo from astropy.utils.compat.optional_deps import HAS_PANDAS from astropy.utils.exceptions import AstropyUserWarning from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH # Skip all tests in this file if we cannot import pyarrow pyarrow = pytest.importorskip("pyarrow") ALL_DTYPES = [ np.uint8, np.uint16, np.uint32, np.uint64, np.int8, np.int16, np.int32, np.int64, np.float32, np.float64, np.bool_, "|S3", "U3", ] def _default_values(dtype): if dtype == np.bool_: return [0, 1, 1] elif dtype == "|S3": return [b"abc", b"def", b"ghi"] elif dtype == "U3": return ["abc", "def", "ghi"] else: return [1, 2, 3] def _default_array_values(dtype): values = _default_values(dtype) return [values for i in range(3)] def _default_var_length_array_values(dtype): values = _default_values(dtype) return [ [ values[0], ], [ values[0], values[1], ], [ values[0], values[1], values[2], ], ] def test_read_write_simple(tmp_path): """Test writing/reading a simple parquet file.""" test_file = tmp_path / "test.parquet" t1 = Table() t1.add_column(Column(name="a", data=[1, 2, 3])) t1.write(test_file) t2 = Table.read(test_file) assert np.all(t2["a"] == [1, 2, 3]) def test_read_write_existing(tmp_path): """Test writing an existing file without overwriting.""" test_file = tmp_path / "test.parquet" with open(test_file, "w") as f: # create empty file pass t1 = Table() t1.add_column(Column(name="a", data=[1, 2, 3])) with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH): t1.write(test_file) def test_read_write_existing_overwrite(tmp_path): """Test overwriting an existing file.""" test_file = tmp_path / "test.parquet" with open(test_file, "w") as f: # create empty file pass t1 = Table() t1.add_column(Column(name="a", data=[1, 2, 3])) t1.write(test_file, overwrite=True) t2 = Table.read(test_file) assert np.all(t2["a"] == [1, 2, 3]) def test_read_fileobj(tmp_path): """Test reading a file object.""" test_file = tmp_path / "test.parquet" t1 = Table() t1.add_column(Column(name="a", data=[1, 2, 3])) t1.write(test_file) import io with io.FileIO(test_file, mode="r") as input_file: t2 = Table.read(input_file) assert np.all(t2["a"] == [1, 2, 3]) def test_read_pathlikeobj(tmp_path): """Test reading a path-like object.""" test_file = tmp_path / "test.parquet" t1 = Table() t1.add_column(Column(name="a", data=[1, 2, 3])) t1.write(test_file) import pathlib p = pathlib.Path(test_file) t2 = Table.read(p) assert np.all(t2["a"] == [1, 2, 3]) def test_read_wrong_fileobj(): """Test reading an incorrect fileobject type.""" class FakeFile: def not_read(self): pass f = FakeFile() with pytest.raises( TypeError, match="pyarrow can only open path-like or file-like objects." ): Table.read(f, format="parquet") def test_identify_wrong_fileobj(): """Test identifying an incorrect fileobj.""" class FakeFile: def not_read(self): pass f = FakeFile() assert not parquet_identify("test", "test", f) def test_identify_file_wrong_extension(): """Test identifying an incorrect extension.""" assert not parquet_identify("test", "test.notparquet", None) def test_identify_file_correct_extension(): """Test identifying an incorrect extension.""" assert parquet_identify("test", "test.parquet", None) assert parquet_identify("test", "test.parq", None) def test_identify_file_noobject_nopath(): """Test running identify with no object or path.""" assert not parquet_identify("test", None, None) def test_write_wrong_type(): """Test writing to a filename of the wrong type.""" t1 = Table() t1.add_column(Column(name="a", data=[1, 2, 3])) with pytest.raises(TypeError, match="should be a string"): t1.write(1212, format="parquet") @pytest.mark.parametrize("dtype", ALL_DTYPES) def test_preserve_single_dtypes(tmp_path, dtype): """Test that round-tripping a single column preserves datatypes.""" test_file = tmp_path / "test.parquet" values = _default_values(dtype) t1 = Table() t1.add_column(Column(name="a", data=np.array(values, dtype=dtype))) t1.write(test_file) t2 = Table.read(test_file) assert np.all(t2["a"] == values) assert t2["a"].dtype == dtype @pytest.mark.parametrize("dtype", ALL_DTYPES) def test_preserve_single_array_dtypes(tmp_path, dtype): """Test that round-tripping a single array column preserves datatypes.""" test_file = tmp_path / "test.parquet" values = _default_array_values(dtype) t1 = Table() t1.add_column(Column(name="a", data=np.array(values, dtype=dtype))) t1.write(test_file) t2 = Table.read(test_file) assert np.all(t2["a"] == t1["a"]) assert np.all(t2["a"].shape == np.array(values).shape) assert t2["a"].dtype == dtype @pytest.mark.parametrize("dtype", ALL_DTYPES) def test_preserve_single_var_length_array_dtypes(tmp_path, dtype): """ Test that round-tripping a single variable length array column preserves datatypes. """ test_file = tmp_path / "test.parquet" values = _default_var_length_array_values(dtype) t1 = Table() data = np.array([np.array(val, dtype=dtype) for val in values], dtype=np.object_) t1.add_column(Column(name="a", data=data)) t1.write(test_file) t2 = Table.read(test_file) for row1, row2 in zip(t1["a"], t2["a"]): assert np.all(row1 == row2) assert row1.dtype == row2.dtype def test_preserve_all_dtypes(tmp_path): """Test that round-tripping preserves a table with all the datatypes.""" test_file = tmp_path / "test.parquet" t1 = Table() for dtype in ALL_DTYPES: values = _default_values(dtype) t1.add_column(Column(name=str(dtype), data=np.array(values, dtype=dtype))) arr_values = _default_array_values(dtype) t1.add_column( Column(name=str(dtype) + "_arr", data=np.array(arr_values, dtype=dtype)) ) t1.write(test_file) t2 = Table.read(test_file) for dtype in ALL_DTYPES: values = _default_values(dtype) assert np.all(t2[str(dtype)] == values) assert t2[str(dtype)].dtype == dtype arr_values = _default_array_values(dtype) assert np.all(t2[str(dtype) + "_arr"] == values) assert t2[str(dtype)].dtype == dtype assert np.all(t2[str(dtype) + "_arr"].shape == np.array(arr_values).shape) # Test just reading the schema schema2 = Table.read(test_file, schema_only=True) assert len(schema2) == 0 assert schema2.dtype == t2.dtype def test_preserve_all_var_length_dtypes(tmp_path): """Test that round-tripping preserves a table with all the var length datatypes.""" test_file = tmp_path / "test.parquet" t1 = Table() for dtype in ALL_DTYPES: varr_values = _default_var_length_array_values(dtype) data = np.array( [np.array(val, dtype=dtype) for val in varr_values], dtype=np.object_ ) t1.add_column(Column(name=str(dtype) + "_varr", data=data)) t1.write(test_file) t2 = Table.read(test_file) for dtype in ALL_DTYPES: varr_values = _default_var_length_array_values(dtype) colname = str(dtype) + "_varr" for row1, row2 in zip(t1[colname], t2[colname]): assert np.all(row1 == row2) assert row1.dtype == row2.dtype def test_write_empty_tables(tmp_path): """Test that we can save an empty table with var length datatypes.""" test_file = tmp_path / "test.parquet" t1 = Table() for dtype in ALL_DTYPES: values = _default_values(dtype) t1.add_column(Column(name=str(dtype), data=np.array(values, dtype=dtype))) arr_values = _default_array_values(dtype) t1.add_column( Column(name=str(dtype) + "_arr", data=np.array(arr_values, dtype=dtype)) ) # Write an empty table with values and arrays, and confirm it works. data = np.zeros(0, dtype=t1.dtype) t2 = Table(data=data) t2.write(test_file) t3 = Table.read(test_file) assert t3.dtype == t2.dtype test_file2 = tmp_path / "test2.parquet" t4 = Table() for dtype in ALL_DTYPES: varr_values = _default_var_length_array_values(dtype) data = np.array( [np.array(val, dtype=dtype) for val in varr_values], dtype=np.object_ ) t4.add_column(Column(name=str(dtype) + "_varr", data=data)) # Write an empty table with variable-length arrays, and confirm this # raises an exception. (The datatype of an np.object_ type column # cannot be inferred from an empty table.) data = np.zeros(0, dtype=t4.dtype) t5 = Table(data=data) with pytest.raises(ValueError, match="Cannot serialize zero-length table") as err: t5.write(test_file2) def test_heterogeneous_var_array_table(tmp_path): """Test exception when trying to serialize a mixed-type variable-length column.""" test_file = tmp_path / "test.parquet" t1 = Table() data = np.array( [ np.array([0, 1, 2], dtype=np.int32), np.array([0, 1, 2, 3, 4], dtype=np.float64), ], dtype=np.object_, ) t1.add_column(Column(name="a", data=data)) with pytest.raises(ValueError, match="Cannot serialize mixed-type column") as err: t1.write(test_file) def test_preserve_meta(tmp_path): """Test that writing/reading preserves metadata.""" test_file = tmp_path / "test.parquet" t1 = Table() t1.add_column(Column(name="a", data=[1, 2, 3])) t1.meta["a"] = 1 t1.meta["b"] = "hello" t1.meta["c"] = 3.14159 t1.meta["d"] = True t1.meta["e"] = np.array([1, 2, 3]) t1.write(test_file) t2 = Table.read(test_file) for key in t1.meta: assert np.all(t1.meta[key] == t2.meta[key]) def test_preserve_serialized(tmp_path): """Test that writing/reading preserves unit/format/description.""" test_file = tmp_path / "test.parquet" t1 = Table() t1["a"] = Column(data=[1, 2, 3], unit="s") t1["a"].meta["a0"] = "A0" t1["a"].meta["a1"] = {"a1": [0, 1]} t1["a"].format = "7.3f" t1["a"].description = "A column" t1.meta["b"] = 1 t1.meta["c"] = {"c0": [0, 1]} t1.write(test_file, overwrite=True) t2 = Table.read(test_file) assert t1["a"].unit == t2["a"].unit assert t1["a"].format == t2["a"].format assert t1["a"].description == t2["a"].description assert t1["a"].meta == t2["a"].meta assert t1.meta == t2.meta def test_metadata_very_large(tmp_path): """Test that very large datasets work""" test_file = tmp_path / "test.parquet" t1 = Table() t1["a"] = Column(data=[1, 2, 3], unit="s") t1["a"].meta["a0"] = "A0" t1["a"].meta["a1"] = {"a1": [0, 1]} t1["a"].format = "7.3f" t1["a"].description = "A column" t1.meta["b"] = 1 t1.meta["c"] = {"c0": [0, 1]} t1.meta["meta_big"] = "0" * (2**16 + 1) t1.meta["meta_biggerstill"] = "0" * (2**18) t1.write(test_file, overwrite=True) t2 = Table.read(test_file) assert t1["a"].unit == t2["a"].unit assert t1["a"].format == t2["a"].format assert t1["a"].description == t2["a"].description assert t1["a"].meta == t2["a"].meta assert t1.meta == t2.meta def test_fail_meta_serialize(tmp_path): """Test that we cannot preserve objects in metadata.""" test_file = tmp_path / "test.parquet" t1 = Table() t1.add_column(Column(name="a", data=[1, 2, 3])) t1.meta["f"] = str with pytest.raises(Exception) as err: t1.write(test_file) assert "cannot represent an object" in str(err.value) assert "<class 'str'>" in str(err.value) def assert_objects_equal(obj1, obj2, attrs, compare_class=True): """Convenient routine to check objects and attributes match.""" if compare_class: assert obj1.__class__ is obj2.__class__ info_attrs = [ "info.name", "info.format", "info.unit", "info.description", "info.meta", ] for attr in attrs + info_attrs: a1 = obj1 a2 = obj2 for subattr in attr.split("."): try: a1 = getattr(a1, subattr) a2 = getattr(a2, subattr) except AttributeError: a1 = a1[subattr] a2 = a2[subattr] # Mixin info.meta can None instead of empty OrderedDict(), #6720 would # fix this. if attr == "info.meta": if a1 is None: a1 = {} if a2 is None: a2 = {} if isinstance(a1, np.ndarray) and a1.dtype.kind == "f": assert quantity_allclose(a1, a2, rtol=1e-15) else: assert np.all(a1 == a2) # Testing Parquet table read/write with mixins. This is mostly # copied from HDF5/FITS mixin testing, and it might be good to unify it. # Analogous tests also exist for ECSV. el = EarthLocation(x=1 * u.km, y=3 * u.km, z=5 * u.km) el2 = EarthLocation(x=[1, 2] * u.km, y=[3, 4] * u.km, z=[5, 6] * u.km) sr = SphericalRepresentation([0, 1] * u.deg, [2, 3] * u.deg, 1 * u.kpc) cr = CartesianRepresentation([0, 1] * u.pc, [4, 5] * u.pc, [8, 6] * u.pc) sd = SphericalCosLatDifferential( [0, 1] * u.mas / u.yr, [0, 1] * u.mas / u.yr, 10 * u.km / u.s ) srd = SphericalRepresentation(sr, differentials=sd) sc = SkyCoord([1, 2], [3, 4], unit="deg,deg", frame="fk4", obstime="J1990.5") scd = SkyCoord( [1, 2], [3, 4], [5, 6], unit="deg,deg,m", frame="fk4", obstime=["J1990.5", "J1991.5"], ) scdc = scd.copy() scdc.representation_type = "cartesian" scpm = SkyCoord( [1, 2], [3, 4], [5, 6], unit="deg,deg,pc", pm_ra_cosdec=[7, 8] * u.mas / u.yr, pm_dec=[9, 10] * u.mas / u.yr, ) scpmrv = SkyCoord( [1, 2], [3, 4], [5, 6], unit="deg,deg,pc", pm_ra_cosdec=[7, 8] * u.mas / u.yr, pm_dec=[9, 10] * u.mas / u.yr, radial_velocity=[11, 12] * u.km / u.s, ) scrv = SkyCoord( [1, 2], [3, 4], [5, 6], unit="deg,deg,pc", radial_velocity=[11, 12] * u.km / u.s ) tm = Time([2450814.5, 2450815.5], format="jd", scale="tai", location=el) # NOTE: in the test below the name of the column "x" for the Quantity is # important since it tests the fix for #10215 (namespace clash, where "x" # clashes with "el2.x"). mixin_cols = { "tm": tm, "dt": TimeDelta([1, 2] * u.day), "sc": sc, "scd": scd, "scdc": scdc, "scpm": scpm, "scpmrv": scpmrv, "scrv": scrv, "x": [1, 2] * u.m, "qdb": [10, 20] * u.dB(u.mW), "qdex": [4.5, 5.5] * u.dex(u.cm / u.s**2), "qmag": [21, 22] * u.ABmag, "lat": Latitude([1, 2] * u.deg), "lon": Longitude([1, 2] * u.deg, wrap_angle=180.0 * u.deg), "ang": Angle([1, 2] * u.deg), "el2": el2, "sr": sr, "cr": cr, "sd": sd, "srd": srd, } time_attrs = ["value", "shape", "format", "scale", "location"] compare_attrs = { "c1": ["data"], "c2": ["data"], "tm": time_attrs, "dt": ["shape", "value", "format", "scale"], "sc": ["ra", "dec", "representation_type", "frame.name"], "scd": ["ra", "dec", "distance", "representation_type", "frame.name"], "scdc": ["x", "y", "z", "representation_type", "frame.name"], "scpm": [ "ra", "dec", "distance", "pm_ra_cosdec", "pm_dec", "representation_type", "frame.name", ], "scpmrv": [ "ra", "dec", "distance", "pm_ra_cosdec", "pm_dec", "radial_velocity", "representation_type", "frame.name", ], "scrv": [ "ra", "dec", "distance", "radial_velocity", "representation_type", "frame.name", ], "x": ["value", "unit"], "qdb": ["value", "unit"], "qdex": ["value", "unit"], "qmag": ["value", "unit"], "lon": ["value", "unit", "wrap_angle"], "lat": ["value", "unit"], "ang": ["value", "unit"], "el2": ["x", "y", "z", "ellipsoid"], "nd": ["x", "y", "z"], "sr": ["lon", "lat", "distance"], "cr": ["x", "y", "z"], "sd": ["d_lon_coslat", "d_lat", "d_distance"], "srd": [ "lon", "lat", "distance", "differentials.s.d_lon_coslat", "differentials.s.d_lat", "differentials.s.d_distance", ], } def test_parquet_mixins_qtable_to_table(tmp_path): """Test writing as QTable and reading as Table. Ensure correct classes come out. """ filename = tmp_path / "test_simple.parquet" names = sorted(mixin_cols) t = QTable([mixin_cols[name] for name in names], names=names) t.write(filename, format="parquet") t2 = Table.read(filename, format="parquet") assert t.colnames == t2.colnames for name, col in t.columns.items(): col2 = t2[name] # Special-case Time, which does not yet support round-tripping # the format. if isinstance(col2, Time): col2.format = col.format attrs = compare_attrs[name] compare_class = True if isinstance(col.info, QuantityInfo): # Downgrade Quantity to Column + unit assert type(col2) is Column # Class-specific attributes like `value` or `wrap_angle` are lost. attrs = ["unit"] compare_class = False # Compare data values here (assert_objects_equal doesn't know how in this case) assert np.all(col.value == col2) assert_objects_equal(col, col2, attrs, compare_class) @pytest.mark.parametrize("table_cls", (Table, QTable)) def test_parquet_mixins_as_one(table_cls, tmp_path): """Test write/read all cols at once and validate intermediate column names""" filename = tmp_path / "test_simple.parquet" names = sorted(mixin_cols) t = table_cls([mixin_cols[name] for name in names], names=names) t.meta["C"] = "spam" t.meta["comments"] = ["this", "is", "a", "comment"] t.meta["history"] = ["first", "second", "third"] t.write(filename, format="parquet") t2 = table_cls.read(filename, format="parquet") assert t2.meta["C"] == "spam" assert t2.meta["comments"] == ["this", "is", "a", "comment"] assert t2.meta["history"] == ["first", "second", "third"] assert t.colnames == t2.colnames @pytest.mark.parametrize("name_col", list(mixin_cols.items())) @pytest.mark.parametrize("table_cls", (Table, QTable)) def test_parquet_mixins_per_column(table_cls, name_col, tmp_path): """Test write/read one col at a time and do detailed validation""" filename = tmp_path / "test_simple.parquet" name, col = name_col c = [1.0, 2.0] t = table_cls([c, col, c], names=["c1", name, "c2"]) t[name].info.description = "my description" t[name].info.meta = {"list": list(range(50)), "dict": {"a": "b" * 200}} if not t.has_mixin_columns: pytest.skip("column is not a mixin (e.g. Quantity subclass in Table)") if isinstance(t[name], NdarrayMixin): pytest.xfail("NdarrayMixin not supported") t.write(filename, format="parquet") t2 = table_cls.read(filename, format="parquet") assert t.colnames == t2.colnames for colname in t.colnames: assert_objects_equal(t[colname], t2[colname], compare_attrs[colname]) # Special case to make sure Column type doesn't leak into Time class data if name.startswith("tm"): assert t2[name]._time.jd1.__class__ is np.ndarray assert t2[name]._time.jd2.__class__ is np.ndarray def test_round_trip_masked_table_default(tmp_path): """Test round-trip of MaskedColumn through Parquet using default serialization that writes a separate mask column. Note: >>> simple_table(masked=True) <Table masked=True length=3> a b c int64 float64 str1 ----- ------- ---- -- 1.0 c 2 2.0 -- 3 -- e """ filename = tmp_path / "test.parquet" t = simple_table(masked=True) # int, float, and str cols with one masked element t["c"] = [b"c", b"d", b"e"] t["c"].mask[1] = True t.write(filename, format="parquet") t2 = Table.read(filename) assert t2.masked is False assert t2.colnames == t.colnames for name in t2.colnames: assert np.all(t2[name].mask == t[name].mask) assert np.all(t2[name] == t[name]) # Data under the mask round-trips also (unmask data to show this). t[name].mask = False t2[name].mask = False assert np.all(t2[name] == t[name]) @pytest.mark.parametrize("table_cls", (Table, QTable)) def test_parquet_mixins_read_one_name(table_cls, tmp_path): """Test write all cols at once, and read one at a time.""" filename = tmp_path / "test_simple.parquet" names = sorted(mixin_cols) t = table_cls([mixin_cols[name] for name in names], names=names) t.meta["C"] = "spam" t.meta["comments"] = ["this", "is", "a", "comment"] t.meta["history"] = ["first", "second", "third"] t.write(filename, format="parquet") for name in names: t2 = table_cls.read(filename, format="parquet", include_names=[name]) assert t2.meta["C"] == "spam" assert t2.meta["comments"] == ["this", "is", "a", "comment"] assert t2.meta["history"] == ["first", "second", "third"] assert t2.colnames == [name] @pytest.mark.parametrize("table_cls", (Table, QTable)) def test_parquet_mixins_read_exclude_names(table_cls, tmp_path): """Test write all cols at once, and read all but one at a time.""" filename = tmp_path / "test_simple.parquet" names = sorted(mixin_cols) t = table_cls([mixin_cols[name] for name in names], names=names) t.meta["C"] = "spam" t.meta["comments"] = ["this", "is", "a", "comment"] t.meta["history"] = ["first", "second", "third"] t.write(filename, format="parquet") t2 = table_cls.read(filename, format="parquet", exclude_names=names[0:5]) assert t.colnames[5:] == t2.colnames @pytest.mark.parametrize("table_cls", (Table, QTable)) def test_parquet_mixins_read_no_columns(table_cls, tmp_path): """Test write all cols at once, and try to read no valid columns.""" filename = tmp_path / "test_simple.parquet" names = sorted(mixin_cols) t = table_cls([mixin_cols[name] for name in names], names=names) t.meta["C"] = "spam" t.meta["comments"] = ["this", "is", "a", "comment"] t.meta["history"] = ["first", "second", "third"] t.write(filename, format="parquet") with pytest.raises(ValueError, match="No include_names specified"): t2 = table_cls.read( filename, format="parquet", include_names=["not_a_column", "also_not_a_column"], ) @pytest.mark.parametrize("table_cls", (Table, QTable)) def test_parquet_mixins_read_schema(table_cls, tmp_path): """Test write all cols at once, and read the schema.""" filename = tmp_path / "test_simple.parquet" names = sorted(mixin_cols) t = table_cls([mixin_cols[name] for name in names], names=names) t.meta["C"] = "spam" t.meta["comments"] = ["this", "is", "a", "comment"] t.meta["history"] = ["first", "second", "third"] t.write(filename, format="parquet") t2 = table_cls.read(filename, format="parquet", schema_only=True) assert t2.meta["C"] == "spam" assert t2.meta["comments"] == ["this", "is", "a", "comment"] assert t2.meta["history"] == ["first", "second", "third"] assert t.colnames == t2.colnames assert len(t2) == 0 def test_parquet_filter(tmp_path): """Test reading a parquet file with a filter.""" filename = tmp_path / "test_simple.parquet" t1 = Table() t1["a"] = Column(data=np.arange(100), dtype=np.int32) t1["b"] = Column(data=np.arange(100, 0, -1), dtype=np.float64) t1.write(filename, overwrite=True) t2 = Table.read(filename, filters=[("a", "<", 50)]) assert t2["a"].max() < 50 t2 = Table.read(filename, filters=[("b", "<", 50)]) assert t2["b"].max() < 50 def test_parquet_read_generic(tmp_path): """Test reading a generic parquet file.""" filename = tmp_path / "test_generic.parq" t1 = Table() for dtype in ALL_DTYPES: values = _default_values(dtype) t1.add_column(Column(name=str(dtype), data=np.array(values, dtype=dtype))) # Write the table generically via pyarrow.parquet names = t1.dtype.names type_list = [ (name, pyarrow.from_numpy_dtype(t1[name].dtype.type)) for name in names ] schema = pyarrow.schema(type_list) _, parquet, writer_version = get_pyarrow() # We use version='2.0' for full support of datatypes including uint32. with parquet.ParquetWriter(filename, schema, version=writer_version) as writer: arrays = [pyarrow.array(t1[name].data) for name in names] writer.write_table(pyarrow.Table.from_arrays(arrays, schema=schema)) with pytest.warns(AstropyUserWarning, match="No table::len"): t2 = Table.read(filename) for dtype in ALL_DTYPES: values = _default_values(dtype) assert np.all(t2[str(dtype)] == values) assert t2[str(dtype)].dtype == dtype @pytest.mark.skipif(not HAS_PANDAS, reason="requires pandas") def test_parquet_read_pandas(tmp_path): """Test reading a pandas parquet file.""" filename = tmp_path / "test_pandas.parq" t1 = Table() for dtype in ALL_DTYPES: values = _default_values(dtype) t1.add_column(Column(name=str(dtype), data=np.array(values, dtype=dtype))) df = t1.to_pandas() # We use version='2.0' for full support of datatypes including uint32. _, _, writer_version = get_pyarrow() df.to_parquet(filename, version=writer_version) with pytest.warns(AstropyUserWarning, match="No table::len"): t2 = Table.read(filename) for dtype in ALL_DTYPES: values = _default_values(dtype) assert np.all(t2[str(dtype)] == values) assert t2[str(dtype)].dtype == dtype
a3417c4b0b8999c21067a62dbe95a79bc417850aea3ac7be614e5c5f293b9b56
# Licensed under a 3-clause BSD style license - see LICENSE.rst from astropy.io.misc.asdf.tags.transform.basic import TransformType from astropy.modeling import math_functions from astropy.modeling.math_functions import * from astropy.modeling.math_functions import __all__ as math_classes __all__ = ["NpUfuncType"] class NpUfuncType(TransformType): name = "transform/math_functions" version = "1.0.0" types = ["astropy.modeling.math_functions." + kl for kl in math_classes] @classmethod def from_tree_transform(cls, node, ctx): klass_name = math_functions._make_class_name(node["func_name"]) klass = getattr(math_functions, klass_name) return klass() @classmethod def to_tree_transform(cls, model, ctx): return {"func_name": model.func.__name__}
94460a564868273f7f02bd57aaee9ff4f81b3f3242dac49eacb3d0de1a8df1df
# Licensed under a 3-clause BSD style license - see LICENSE.rst from asdf.tags.core import NDArrayType from astropy.coordinates.spectral_coordinate import SpectralCoord from astropy.io.misc.asdf.tags.unit.unit import UnitType from astropy.io.misc.asdf.types import AstropyType __all__ = ["SpectralCoordType"] class SpectralCoordType(AstropyType): """ ASDF tag implementation used to serialize/derialize SpectralCoord objects. """ name = "coordinates/spectralcoord" types = [SpectralCoord] version = "1.0.0" @classmethod def to_tree(cls, spec_coord, ctx): node = {} if isinstance(spec_coord, SpectralCoord): node["value"] = spec_coord.value node["unit"] = spec_coord.unit if spec_coord.observer is not None: node["observer"] = spec_coord.observer if spec_coord.target is not None: node["target"] = spec_coord.target return node raise TypeError(f"'{spec_coord}' is not a valid SpectralCoord") @classmethod def from_tree(cls, node, ctx): if isinstance(node, SpectralCoord): return node unit = UnitType.from_tree(node["unit"], ctx) value = node["value"] observer = node["observer"] if "observer" in node else None target = node["target"] if "observer" in node else None if isinstance(value, NDArrayType): value = value._make_array() return SpectralCoord(value, unit=unit, observer=observer, target=target)
b9a8180d0ff4be05fa35a694f562629315b835c5c45bcafd940a3e64672f0f14
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pytest # LOCAL from astropy.io.votable import converters, exceptions, tree def test_reraise(): def fail(): raise RuntimeError("This failed") with pytest.raises(RuntimeError, match="From here"): try: fail() except RuntimeError as e: exceptions.vo_reraise(e, additional="From here") def test_parse_vowarning(): config = {"verify": "exception", "filename": "foo.xml"} pos = (42, 64) with pytest.warns(exceptions.W47) as w: field = tree.Field(None, name="c", datatype="char", config=config, pos=pos) converters.get_converter(field, config=config, pos=pos) parts = exceptions.parse_vowarning(str(w[0].message)) match = { "number": 47, "is_exception": False, "nchar": 64, "warning": "W47", "is_something": True, "message": "Missing arraysize indicates length 1", "doc_url": "io/votable/api_exceptions.html#w47", "nline": 42, "is_warning": True, } assert parts == match def test_suppress_warnings(): cfg = {} warn = exceptions.W01("foo") with exceptions.conf.set_temp("max_warnings", 2): with pytest.warns(exceptions.W01) as record: exceptions._suppressed_warning(warn, cfg) assert len(record) == 1 assert "suppressing" not in str(record[0].message) with pytest.warns(exceptions.W01, match="suppressing"): exceptions._suppressed_warning(warn, cfg) exceptions._suppressed_warning(warn, cfg) assert cfg["_warning_counts"][exceptions.W01] == 3 assert exceptions.conf.max_warnings == 10
1e170b088cf54453388ccd700c7312a0d80d1726a0a5e36dcdab50e3876bd405
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ A set of tests for the util.py module. """ import pytest from astropy.io.votable import util def test_range_list(): assert util.coerce_range_list_param((5,)) == ("5.0", 1) def test_range_list2(): assert util.coerce_range_list_param((5e-7, 8e-7)) == ("5e-07,8e-07", 2) def test_range_list3(): assert util.coerce_range_list_param((5e-7, 8e-7, "FOO")) == ("5e-07,8e-07;FOO", 3) def test_range_list4a(): with pytest.raises(ValueError): util.coerce_range_list_param( (5e-7, (None, 8e-7), (4, None), (4, 5), "J", "FOO") ) def test_range_list4(): assert util.coerce_range_list_param( (5e-7, (None, 8e-7), (4, None), (4, 5), "J", "FOO"), numeric=False ) == ("5e-07,/8e-07,4/,4/5,J;FOO", 6) def test_range_list5(): with pytest.raises(ValueError): util.coerce_range_list_param(("FOO",)) def test_range_list6(): with pytest.raises(ValueError): print(util.coerce_range_list_param((5, "FOO"), util.stc_reference_frames)) def test_range_list7(): assert util.coerce_range_list_param(("J",), numeric=False) == ("J", 1) def test_range_list8(): for s in [ "5.0", "5e-07,8e-07", "5e-07,8e-07;FOO", "5e-07,/8e-07,4.0/,4.0/5.0;FOO", "J", ]: assert util.coerce_range_list_param(s, numeric=False)[0] == s def test_range_list9a(): with pytest.raises(ValueError): util.coerce_range_list_param("52,-27.8;FOO", util.stc_reference_frames) def test_range_list9(): assert util.coerce_range_list_param("52,-27.8;GALACTIC", util.stc_reference_frames)
9bf026e9f2682f2fbaf68f5032cd817462626046ab0b259e14f3fae6d414a8b7
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This is a set of regression tests for vo. """ # STDLIB import difflib import gzip import io import pathlib import sys from unittest import mock import numpy as np # THIRD-PARTY import pytest from numpy.testing import assert_array_equal from astropy.io.votable import tree from astropy.io.votable.exceptions import W39, VOTableSpecError, VOWarning # LOCAL from astropy.io.votable.table import parse, parse_single_table, validate from astropy.io.votable.xmlutil import validate_schema from astropy.utils.data import get_pkg_data_filename, get_pkg_data_filenames # Determine the kind of float formatting in this build of Python if hasattr(sys, "float_repr_style"): legacy_float_repr = sys.float_repr_style == "legacy" else: legacy_float_repr = sys.platform.startswith("win") def assert_validate_schema(filename, version): if sys.platform.startswith("win"): return try: rc, stdout, stderr = validate_schema(filename, version) except OSError: # If xmllint is not installed, we want the test to pass anyway return assert rc == 0, "File did not validate against VOTable schema" def test_parse_single_table(): with np.errstate(over="ignore"): # https://github.com/astropy/astropy/issues/13341 table = parse_single_table(get_pkg_data_filename("data/regression.xml")) assert isinstance(table, tree.Table) assert len(table.array) == 5 def test_parse_single_table2(): with np.errstate(over="ignore"): # https://github.com/astropy/astropy/issues/13341 table2 = parse_single_table( get_pkg_data_filename("data/regression.xml"), table_number=1 ) assert isinstance(table2, tree.Table) assert len(table2.array) == 1 assert len(table2.array.dtype.names) == 28 def test_parse_single_table3(): with pytest.raises(IndexError): parse_single_table(get_pkg_data_filename("data/regression.xml"), table_number=3) def _test_regression(tmp_path, _python_based=False, binary_mode=1): # Read the VOTABLE votable = parse( get_pkg_data_filename("data/regression.xml"), _debug_python_based_parser=_python_based, ) table = votable.get_first_table() dtypes = [ (("string test", "string_test"), "|O8"), (("fixed string test", "string_test_2"), "<U10"), ("unicode_test", "|O8"), (("unicode test", "fixed_unicode_test"), "<U10"), (("string array test", "string_array_test"), "<U4"), ("unsignedByte", "|u1"), ("short", "<i2"), ("int", "<i4"), ("long", "<i8"), ("double", "<f8"), ("float", "<f4"), ("array", "|O8"), ("bit", "|b1"), ("bitarray", "|b1", (3, 2)), ("bitvararray", "|O8"), ("bitvararray2", "|O8"), ("floatComplex", "<c8"), ("doubleComplex", "<c16"), ("doubleComplexArray", "|O8"), ("doubleComplexArrayFixed", "<c16", (2,)), ("boolean", "|b1"), ("booleanArray", "|b1", (4,)), ("nulls", "<i4"), ("nulls_array", "<i4", (2, 2)), ("precision1", "<f8"), ("precision2", "<f8"), ("doublearray", "|O8"), ("bitarray2", "|b1", (16,)), ] if sys.byteorder == "big": new_dtypes = [] for dtype in dtypes: dtype = list(dtype) dtype[1] = dtype[1].replace("<", ">") new_dtypes.append(tuple(dtype)) dtypes = new_dtypes assert table.array.dtype == dtypes votable.to_xml( str(tmp_path / "regression.tabledata.xml"), _debug_python_based_parser=_python_based, ) assert_validate_schema(str(tmp_path / "regression.tabledata.xml"), votable.version) if binary_mode == 1: votable.get_first_table().format = "binary" votable.version = "1.1" elif binary_mode == 2: votable.get_first_table()._config["version_1_3_or_later"] = True votable.get_first_table().format = "binary2" votable.version = "1.3" # Also try passing a file handle with open(str(tmp_path / "regression.binary.xml"), "wb") as fd: votable.to_xml(fd, _debug_python_based_parser=_python_based) assert_validate_schema(str(tmp_path / "regression.binary.xml"), votable.version) # Also try passing a file handle with open(str(tmp_path / "regression.binary.xml"), "rb") as fd: votable2 = parse(fd, _debug_python_based_parser=_python_based) votable2.get_first_table().format = "tabledata" votable2.to_xml( str(tmp_path / "regression.bin.tabledata.xml"), _astropy_version="testing", _debug_python_based_parser=_python_based, ) assert_validate_schema( str(tmp_path / "regression.bin.tabledata.xml"), votable.version ) with open( get_pkg_data_filename( f"data/regression.bin.tabledata.truth.{votable.version}.xml" ), encoding="utf-8", ) as fd: truth = fd.readlines() with open(str(tmp_path / "regression.bin.tabledata.xml"), encoding="utf-8") as fd: output = fd.readlines() # If the lines happen to be different, print a diff # This is convenient for debugging sys.stdout.writelines( difflib.unified_diff(truth, output, fromfile="truth", tofile="output") ) assert truth == output # Test implicit gzip saving votable2.to_xml( str(tmp_path / "regression.bin.tabledata.xml.gz"), _astropy_version="testing", _debug_python_based_parser=_python_based, ) with gzip.GzipFile(str(tmp_path / "regression.bin.tabledata.xml.gz"), "rb") as gzfd: output = gzfd.readlines() output = [x.decode("utf-8").rstrip() for x in output] truth = [x.rstrip() for x in truth] assert truth == output @pytest.mark.xfail("legacy_float_repr") def test_regression(tmp_path): # W39: Bit values can not be masked with pytest.warns(W39): _test_regression(tmp_path, False) @pytest.mark.xfail("legacy_float_repr") def test_regression_python_based_parser(tmp_path): # W39: Bit values can not be masked with pytest.warns(W39): _test_regression(tmp_path, True) @pytest.mark.xfail("legacy_float_repr") def test_regression_binary2(tmp_path): # W39: Bit values can not be masked with pytest.warns(W39): _test_regression(tmp_path, False, 2) class TestFixups: def setup_class(self): with np.errstate(over="ignore"): # https://github.com/astropy/astropy/issues/13341 self.table = parse( get_pkg_data_filename("data/regression.xml") ).get_first_table() self.array = self.table.array self.mask = self.table.array.mask def test_implicit_id(self): assert_array_equal(self.array["string_test_2"], self.array["fixed string test"]) class TestReferences: def setup_class(self): with np.errstate(over="ignore"): # https://github.com/astropy/astropy/issues/13341 self.votable = parse(get_pkg_data_filename("data/regression.xml")) self.table = self.votable.get_first_table() self.array = self.table.array self.mask = self.table.array.mask def test_fieldref(self): fieldref = self.table.groups[1].entries[0] assert isinstance(fieldref, tree.FieldRef) assert fieldref.get_ref().name == "boolean" assert fieldref.get_ref().datatype == "boolean" def test_paramref(self): paramref = self.table.groups[0].entries[0] assert isinstance(paramref, tree.ParamRef) assert paramref.get_ref().name == "INPUT" assert paramref.get_ref().datatype == "float" def test_iter_fields_and_params_on_a_group(self): assert len(list(self.table.groups[1].iter_fields_and_params())) == 2 def test_iter_groups_on_a_group(self): assert len(list(self.table.groups[1].iter_groups())) == 1 def test_iter_groups(self): # Because of the ref'd table, there are more logical groups # than actually exist in the file assert len(list(self.votable.iter_groups())) == 9 def test_ref_table(self): tables = list(self.votable.iter_tables()) for x, y in zip(tables[0].array.data[0], tables[1].array.data[0]): assert_array_equal(x, y) def test_iter_coosys(self): assert len(list(self.votable.iter_coosys())) == 1 def test_select_columns_by_index(): columns = [0, 5, 13] table = parse( get_pkg_data_filename("data/regression.xml"), columns=columns ).get_first_table() array = table.array mask = table.array.mask assert array["string_test"][0] == "String & test" columns = ["string_test", "unsignedByte", "bitarray"] for c in columns: assert not np.all(mask[c]) assert np.all(mask["unicode_test"]) def test_select_columns_by_name(): columns = ["string_test", "unsignedByte", "bitarray"] table = parse( get_pkg_data_filename("data/regression.xml"), columns=columns ).get_first_table() array = table.array mask = table.array.mask assert array["string_test"][0] == "String & test" for c in columns: assert not np.all(mask[c]) assert np.all(mask["unicode_test"]) class TestParse: def setup_class(self): with np.errstate(over="ignore"): # https://github.com/astropy/astropy/issues/13341 self.votable = parse(get_pkg_data_filename("data/regression.xml")) self.table = self.votable.get_first_table() self.array = self.table.array self.mask = self.table.array.mask def test_string_test(self): assert issubclass(self.array["string_test"].dtype.type, np.object_) assert_array_equal( self.array["string_test"], ["String & test", "String &amp; test", "XXXX", "", ""], ) def test_fixed_string_test(self): assert issubclass(self.array["string_test_2"].dtype.type, np.unicode_) assert_array_equal( self.array["string_test_2"], ["Fixed stri", "0123456789", "XXXX", "", ""] ) def test_unicode_test(self): assert issubclass(self.array["unicode_test"].dtype.type, np.object_) assert_array_equal( self.array["unicode_test"], ["Ceçi n'est pas un pipe", "வணக்கம்", "XXXX", "", ""], ) def test_fixed_unicode_test(self): assert issubclass(self.array["fixed_unicode_test"].dtype.type, np.unicode_) assert_array_equal( self.array["fixed_unicode_test"], ["Ceçi n'est", "வணக்கம்", "0123456789", "", ""], ) def test_unsignedByte(self): assert issubclass(self.array["unsignedByte"].dtype.type, np.uint8) assert_array_equal(self.array["unsignedByte"], [128, 255, 0, 255, 255]) assert not np.any(self.mask["unsignedByte"]) def test_short(self): assert issubclass(self.array["short"].dtype.type, np.int16) assert_array_equal(self.array["short"], [4096, 32767, -4096, 32767, 32767]) assert not np.any(self.mask["short"]) def test_int(self): assert issubclass(self.array["int"].dtype.type, np.int32) assert_array_equal( self.array["int"], [268435456, 2147483647, -268435456, 268435455, 123456789] ) assert_array_equal(self.mask["int"], [False, False, False, False, True]) def test_long(self): assert issubclass(self.array["long"].dtype.type, np.int64) assert_array_equal( self.array["long"], [ 922337203685477, 123456789, -1152921504606846976, 1152921504606846975, 123456789, ], ) assert_array_equal(self.mask["long"], [False, True, False, False, True]) def test_double(self): assert issubclass(self.array["double"].dtype.type, np.float64) assert_array_equal( self.array["double"], [8.9990234375, 0.0, np.inf, np.nan, -np.inf] ) assert_array_equal(self.mask["double"], [False, False, False, True, False]) def test_float(self): assert issubclass(self.array["float"].dtype.type, np.float32) assert_array_equal(self.array["float"], [1.0, 0.0, np.inf, np.inf, np.nan]) assert_array_equal(self.mask["float"], [False, False, False, False, True]) def test_array(self): assert issubclass(self.array["array"].dtype.type, np.object_) match = [ [], [[42, 32], [12, 32]], [[12, 34], [56, 78], [87, 65], [43, 21]], [[-1, 23]], [[31, -1]], ] for a, b in zip(self.array["array"], match): # assert issubclass(a.dtype.type, np.int64) # assert a.shape[1] == 2 for a0, b0 in zip(a, b): assert issubclass(a0.dtype.type, np.int64) assert_array_equal(a0, b0) assert self.array.data["array"][3].mask[0][0] assert self.array.data["array"][4].mask[0][1] def test_bit(self): assert issubclass(self.array["bit"].dtype.type, np.bool_) assert_array_equal(self.array["bit"], [True, False, True, False, False]) def test_bit_mask(self): assert_array_equal(self.mask["bit"], [False, False, False, False, True]) def test_bitarray(self): assert issubclass(self.array["bitarray"].dtype.type, np.bool_) assert self.array["bitarray"].shape == (5, 3, 2) assert_array_equal( self.array["bitarray"], [ [[True, False], [True, True], [False, True]], [[False, True], [False, False], [True, True]], [[True, True], [True, False], [False, False]], [[False, False], [False, False], [False, False]], [[False, False], [False, False], [False, False]], ], ) def test_bitarray_mask(self): assert_array_equal( self.mask["bitarray"], [ [[False, False], [False, False], [False, False]], [[False, False], [False, False], [False, False]], [[False, False], [False, False], [False, False]], [[True, True], [True, True], [True, True]], [[True, True], [True, True], [True, True]], ], ) def test_bitvararray(self): assert issubclass(self.array["bitvararray"].dtype.type, np.object_) match = [ [True, True, True], [False, False, False, False, False], [True, False, True, False, True], [], [], ] for a, b in zip(self.array["bitvararray"], match): assert_array_equal(a, b) match_mask = [ [False, False, False], [False, False, False, False, False], [False, False, False, False, False], False, False, ] for a, b in zip(self.array["bitvararray"], match_mask): assert_array_equal(a.mask, b) def test_bitvararray2(self): assert issubclass(self.array["bitvararray2"].dtype.type, np.object_) match = [ [], [ [[False, True], [False, False], [True, False]], [[True, False], [True, False], [True, False]], ], [[[True, True], [True, True], [True, True]]], [], [], ] for a, b in zip(self.array["bitvararray2"], match): for a0, b0 in zip(a, b): assert a0.shape == (3, 2) assert issubclass(a0.dtype.type, np.bool_) assert_array_equal(a0, b0) def test_floatComplex(self): assert issubclass(self.array["floatComplex"].dtype.type, np.complex64) assert_array_equal( self.array["floatComplex"], [np.nan + 0j, 0 + 0j, 0 + -1j, np.nan + 0j, np.nan + 0j], ) assert_array_equal(self.mask["floatComplex"], [True, False, False, True, True]) def test_doubleComplex(self): assert issubclass(self.array["doubleComplex"].dtype.type, np.complex128) assert_array_equal( self.array["doubleComplex"], [np.nan + 0j, 0 + 0j, 0 + -1j, np.nan + (np.inf * 1j), np.nan + 0j], ) assert_array_equal(self.mask["doubleComplex"], [True, False, False, True, True]) def test_doubleComplexArray(self): assert issubclass(self.array["doubleComplexArray"].dtype.type, np.object_) assert [len(x) for x in self.array["doubleComplexArray"]] == [0, 2, 2, 0, 0] def test_boolean(self): assert issubclass(self.array["boolean"].dtype.type, np.bool_) assert_array_equal(self.array["boolean"], [True, False, True, False, False]) def test_boolean_mask(self): assert_array_equal(self.mask["boolean"], [False, False, False, False, True]) def test_boolean_array(self): assert issubclass(self.array["booleanArray"].dtype.type, np.bool_) assert_array_equal( self.array["booleanArray"], [ [True, True, True, True], [True, True, False, True], [True, True, False, True], [False, False, False, False], [False, False, False, False], ], ) def test_boolean_array_mask(self): assert_array_equal( self.mask["booleanArray"], [ [False, False, False, False], [False, False, False, False], [False, False, True, False], [True, True, True, True], [True, True, True, True], ], ) def test_nulls(self): assert_array_equal(self.array["nulls"], [0, -9, 2, -9, -9]) assert_array_equal(self.mask["nulls"], [False, True, False, True, True]) def test_nulls_array(self): assert_array_equal( self.array["nulls_array"], [ [[-9, -9], [-9, -9]], [[0, 1], [2, 3]], [[-9, 0], [-9, 1]], [[0, -9], [1, -9]], [[-9, -9], [-9, -9]], ], ) assert_array_equal( self.mask["nulls_array"], [ [[True, True], [True, True]], [[False, False], [False, False]], [[True, False], [True, False]], [[False, True], [False, True]], [[True, True], [True, True]], ], ) def test_double_array(self): assert issubclass(self.array["doublearray"].dtype.type, np.object_) assert len(self.array["doublearray"][0]) == 0 assert_array_equal( self.array["doublearray"][1], [0, 1, np.inf, -np.inf, np.nan, 0, -1] ) assert_array_equal( self.array.data["doublearray"][1].mask, [False, False, False, False, False, False, True], ) def test_bit_array2(self): assert_array_equal( self.array["bitarray2"][0], [ True, True, True, True, False, False, False, False, True, True, True, True, False, False, False, False, ], ) def test_bit_array2_mask(self): assert not np.any(self.mask["bitarray2"][0]) assert np.all(self.mask["bitarray2"][1:]) def test_get_coosys_by_id(self): coosys = self.votable.get_coosys_by_id("J2000") assert coosys.system == "eq_FK5" def test_get_field_by_utype(self): fields = list(self.votable.get_fields_by_utype("myint")) assert fields[0].name == "int" assert fields[0].values.min == -1000 def test_get_info_by_id(self): info = self.votable.get_info_by_id("QUERY_STATUS") assert info.value == "OK" if self.votable.version != "1.1": info = self.votable.get_info_by_id("ErrorInfo") assert info.value == "One might expect to find some INFO here, too..." def test_repr(self): assert "3 tables" in repr(self.votable) assert ( repr(list(self.votable.iter_fields_and_params())[0]) == '<PARAM ID="awesome" arraysize="*" datatype="float" ' 'name="INPUT" unit="deg" value="[0.0 0.0]"/>' ) # Smoke test repr(list(self.votable.iter_groups())) # Resource assert repr(self.votable.resources) == "[</>]" class TestThroughTableData(TestParse): def setup_class(self): with np.errstate(over="ignore"): # https://github.com/astropy/astropy/issues/13341 votable = parse(get_pkg_data_filename("data/regression.xml")) self.xmlout = bio = io.BytesIO() # W39: Bit values can not be masked with pytest.warns(W39): votable.to_xml(bio) bio.seek(0) self.votable = parse(bio) self.table = self.votable.get_first_table() self.array = self.table.array self.mask = self.table.array.mask def test_bit_mask(self): assert_array_equal(self.mask["bit"], [False, False, False, False, False]) def test_bitarray_mask(self): assert not np.any(self.mask["bitarray"]) def test_bit_array2_mask(self): assert not np.any(self.mask["bitarray2"]) def test_schema(self, tmp_path): # have to use an actual file because assert_validate_schema only works # on filenames, not file-like objects fn = tmp_path / "test_through_tabledata.xml" with open(fn, "wb") as f: f.write(self.xmlout.getvalue()) assert_validate_schema(fn, "1.1") class TestThroughBinary(TestParse): def setup_class(self): with np.errstate(over="ignore"): # https://github.com/astropy/astropy/issues/13341 votable = parse(get_pkg_data_filename("data/regression.xml")) votable.get_first_table().format = "binary" self.xmlout = bio = io.BytesIO() # W39: Bit values can not be masked with pytest.warns(W39): votable.to_xml(bio) bio.seek(0) self.votable = parse(bio) self.table = self.votable.get_first_table() self.array = self.table.array self.mask = self.table.array.mask # Masked values in bit fields don't roundtrip through the binary # representation -- that's not a bug, just a limitation, so # override the mask array checks here. def test_bit_mask(self): assert not np.any(self.mask["bit"]) def test_bitarray_mask(self): assert not np.any(self.mask["bitarray"]) def test_bit_array2_mask(self): assert not np.any(self.mask["bitarray2"]) class TestThroughBinary2(TestParse): def setup_class(self): with np.errstate(over="ignore"): # https://github.com/astropy/astropy/issues/13341 votable = parse(get_pkg_data_filename("data/regression.xml")) votable.version = "1.3" votable.get_first_table()._config["version_1_3_or_later"] = True votable.get_first_table().format = "binary2" self.xmlout = bio = io.BytesIO() # W39: Bit values can not be masked with pytest.warns(W39): votable.to_xml(bio) bio.seek(0) self.votable = parse(bio) self.table = self.votable.get_first_table() self.array = self.table.array self.mask = self.table.array.mask def test_get_coosys_by_id(self): # No COOSYS in VOTable 1.2 or later pass def table_from_scratch(): from astropy.io.votable.tree import Field, Resource, Table, VOTableFile # Create a new VOTable file... votable = VOTableFile() # ...with one resource... resource = Resource() votable.resources.append(resource) # ... with one table table = Table(votable) resource.tables.append(table) # Define some fields table.fields.extend( [ Field(votable, ID="filename", datatype="char"), Field(votable, ID="matrix", datatype="double", arraysize="2x2"), ] ) # Now, use those field definitions to create the numpy record arrays, with # the given number of rows table.create_arrays(2) # Now table.array can be filled with data table.array[0] = ("test1.xml", [[1, 0], [0, 1]]) table.array[1] = ("test2.xml", [[0.5, 0.3], [0.2, 0.1]]) # Now write the whole thing to a file. # Note, we have to use the top-level votable file object out = io.StringIO() votable.to_xml(out) # https://github.com/astropy/astropy/issues/13341 @np.errstate(over="ignore") def test_open_files(): for filename in get_pkg_data_filenames("data", pattern="*.xml"): if filename.endswith("custom_datatype.xml") or filename.endswith( "timesys_errors.xml" ): continue parse(filename) def test_too_many_columns(): with pytest.raises(VOTableSpecError): parse(get_pkg_data_filename("data/too_many_columns.xml.gz")) def test_build_from_scratch(tmp_path): # Create a new VOTable file... votable = tree.VOTableFile() # ...with one resource... resource = tree.Resource() votable.resources.append(resource) # ... with one table table = tree.Table(votable) resource.tables.append(table) # Define some fields table.fields.extend( [ tree.Field( votable, ID="filename", name="filename", datatype="char", arraysize="1" ), tree.Field( votable, ID="matrix", name="matrix", datatype="double", arraysize="2x2" ), ] ) # Now, use those field definitions to create the numpy record arrays, with # the given number of rows table.create_arrays(2) # Now table.array can be filled with data table.array[0] = ("test1.xml", [[1, 0], [0, 1]]) table.array[1] = ("test2.xml", [[0.5, 0.3], [0.2, 0.1]]) # Now write the whole thing to a file. # Note, we have to use the top-level votable file object votable.to_xml(str(tmp_path / "new_votable.xml")) votable = parse(str(tmp_path / "new_votable.xml")) table = votable.get_first_table() assert_array_equal( table.array.mask, np.array( [ (False, [[False, False], [False, False]]), (False, [[False, False], [False, False]]), ], dtype=[("filename", "?"), ("matrix", "?", (2, 2))], ), ) def test_validate(test_path_object=False): """ test_path_object is needed for test below ``test_validate_path_object`` so that file could be passed as pathlib.Path object. """ output = io.StringIO() fpath = get_pkg_data_filename("data/regression.xml") if test_path_object: fpath = pathlib.Path(fpath) # We can't test xmllint, because we can't rely on it being on the # user's machine. result = validate(fpath, output, xmllint=False) assert result is False output.seek(0) output = output.readlines() # Uncomment to generate new groundtruth # with open('validation.txt', 'wt', encoding='utf-8') as fd: # fd.write(u''.join(output)) with open(get_pkg_data_filename("data/validation.txt"), encoding="utf-8") as fd: truth = fd.readlines() truth = truth[1:] output = output[1:-1] sys.stdout.writelines( difflib.unified_diff(truth, output, fromfile="truth", tofile="output") ) assert truth == output @mock.patch("subprocess.Popen") def test_validate_xmllint_true(mock_subproc_popen): process_mock = mock.Mock() attrs = {"communicate.return_value": ("ok", "ko"), "returncode": 0} process_mock.configure_mock(**attrs) mock_subproc_popen.return_value = process_mock assert validate(get_pkg_data_filename("data/empty_table.xml"), xmllint=True) def test_validate_path_object(): """Validating when source is passed as path object (#4412).""" test_validate(test_path_object=True) def test_gzip_filehandles(tmp_path): with np.errstate(over="ignore"): # https://github.com/astropy/astropy/issues/13341 votable = parse(get_pkg_data_filename("data/regression.xml")) # W39: Bit values can not be masked with pytest.warns(W39): with open(tmp_path / "regression.compressed.xml", "wb") as fd: votable.to_xml(fd, compressed=True, _astropy_version="testing") with open(tmp_path / "regression.compressed.xml", "rb") as fd: votable = parse(fd) def test_from_scratch_example(): _run_test_from_scratch_example() def _run_test_from_scratch_example(): from astropy.io.votable.tree import Field, Resource, Table, VOTableFile # Create a new VOTable file... votable = VOTableFile() # ...with one resource... resource = Resource() votable.resources.append(resource) # ... with one table table = Table(votable) resource.tables.append(table) # Define some fields table.fields.extend( [ Field(votable, name="filename", datatype="char", arraysize="*"), Field(votable, name="matrix", datatype="double", arraysize="2x2"), ] ) # Now, use those field definitions to create the numpy record arrays, with # the given number of rows table.create_arrays(2) # Now table.array can be filled with data table.array[0] = ("test1.xml", [[1, 0], [0, 1]]) table.array[1] = ("test2.xml", [[0.5, 0.3], [0.2, 0.1]]) assert table.array[0][0] == "test1.xml" def test_fileobj(): # Assert that what we get back is a raw C file pointer # so it will be super fast in the C extension. from astropy.utils.xml import iterparser filename = get_pkg_data_filename("data/regression.xml") with iterparser._convert_to_fd_or_read_function(filename) as fd: if sys.platform == "win32": fd() else: assert isinstance(fd, io.FileIO) def test_nonstandard_units(): from astropy import units as u votable = parse(get_pkg_data_filename("data/nonstandard_units.xml")) assert isinstance(votable.get_first_table().fields[0].unit, u.UnrecognizedUnit) votable = parse( get_pkg_data_filename("data/nonstandard_units.xml"), unit_format="generic" ) assert not isinstance(votable.get_first_table().fields[0].unit, u.UnrecognizedUnit) def test_resource_structure(): # Based on issue #1223, as reported by @astro-friedel and @RayPlante from astropy.io.votable import tree as vot vtf = vot.VOTableFile() r1 = vot.Resource() vtf.resources.append(r1) t1 = vot.Table(vtf) t1.name = "t1" t2 = vot.Table(vtf) t2.name = "t2" r1.tables.append(t1) r1.tables.append(t2) r2 = vot.Resource() vtf.resources.append(r2) t3 = vot.Table(vtf) t3.name = "t3" t4 = vot.Table(vtf) t4.name = "t4" r2.tables.append(t3) r2.tables.append(t4) r3 = vot.Resource() vtf.resources.append(r3) t5 = vot.Table(vtf) t5.name = "t5" t6 = vot.Table(vtf) t6.name = "t6" r3.tables.append(t5) r3.tables.append(t6) buff = io.BytesIO() vtf.to_xml(buff) buff.seek(0) vtf2 = parse(buff) assert len(vtf2.resources) == 3 for r in range(len(vtf2.resources)): res = vtf2.resources[r] assert len(res.tables) == 2 assert len(res.resources) == 0 def test_no_resource_check(): output = io.StringIO() # We can't test xmllint, because we can't rely on it being on the # user's machine. result = validate( get_pkg_data_filename("data/no_resource.xml"), output, xmllint=False ) assert result is False output.seek(0) output = output.readlines() # Uncomment to generate new groundtruth # with open('no_resource.txt', 'wt', encoding='utf-8') as fd: # fd.write(u''.join(output)) with open(get_pkg_data_filename("data/no_resource.txt"), encoding="utf-8") as fd: truth = fd.readlines() truth = truth[1:] output = output[1:-1] sys.stdout.writelines( difflib.unified_diff(truth, output, fromfile="truth", tofile="output") ) assert truth == output def test_instantiate_vowarning(): # This used to raise a deprecation exception. # See https://github.com/astropy/astroquery/pull/276 VOWarning(()) def test_custom_datatype(): votable = parse( get_pkg_data_filename("data/custom_datatype.xml"), datatype_mapping={"bar": "int"}, ) table = votable.get_first_table() assert table.array.dtype["foo"] == np.int32 def _timesys_tests(votable): assert len(list(votable.iter_timesys())) == 4 timesys = votable.get_timesys_by_id("time_frame") assert timesys.timeorigin == 2455197.5 assert timesys.timescale == "TCB" assert timesys.refposition == "BARYCENTER" timesys = votable.get_timesys_by_id("mjd_origin") assert timesys.timeorigin == "MJD-origin" assert timesys.timescale == "TDB" assert timesys.refposition == "EMBARYCENTER" timesys = votable.get_timesys_by_id("jd_origin") assert timesys.timeorigin == "JD-origin" assert timesys.timescale == "TT" assert timesys.refposition == "HELIOCENTER" timesys = votable.get_timesys_by_id("no_origin") assert timesys.timeorigin is None assert timesys.timescale == "UTC" assert timesys.refposition == "TOPOCENTER" def test_timesys(): votable = parse(get_pkg_data_filename("data/timesys.xml")) _timesys_tests(votable) def test_timesys_roundtrip(): orig_votable = parse(get_pkg_data_filename("data/timesys.xml")) bio = io.BytesIO() orig_votable.to_xml(bio) bio.seek(0) votable = parse(bio) _timesys_tests(votable) def test_timesys_errors(): output = io.StringIO() validate(get_pkg_data_filename("data/timesys_errors.xml"), output, xmllint=False) outstr = output.getvalue() assert "E23: Invalid timeorigin attribute 'bad-origin'" in outstr assert "E22: ID attribute is required for all TIMESYS elements" in outstr assert "W48: Unknown attribute 'refposition_mispelled' on TIMESYS" in outstr
d2a0b4745db28ed33d00569fe309bd138cdbc1a1f048db5009fd24759aa41cbf
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Test the conversion to/from astropy.table. """ import io import os import pathlib import numpy as np import pytest from astropy.config import reload_config, set_temp_config from astropy.io.votable import conf, from_table, is_votable, tree, validate from astropy.io.votable.exceptions import E25, W39, VOWarning from astropy.io.votable.table import parse, writeto from astropy.table import Column, Table from astropy.table.table_helpers import simple_table from astropy.units import Unit from astropy.utils.data import ( get_pkg_data_filename, get_pkg_data_fileobj, get_pkg_data_path, ) from astropy.utils.exceptions import AstropyDeprecationWarning from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH @pytest.fixture def home_is_data(monkeypatch): """ Pytest fixture to run a test case with tilde-prefixed paths. In the tilde-path case, environment variables are temporarily modified so that '~' resolves to the data directory. """ path = get_pkg_data_path("data") # For Unix monkeypatch.setenv("HOME", path) # For Windows monkeypatch.setenv("USERPROFILE", path) @pytest.fixture def home_is_tmpdir(monkeypatch, tmp_path): """ Pytest fixture to run a test case with tilde-prefixed paths. In the tilde-path case, environment variables are temporarily modified so that '~' resolves to the temp directory. """ # For Unix monkeypatch.setenv("HOME", str(tmp_path)) # For Windows monkeypatch.setenv("USERPROFILE", str(tmp_path)) def test_table(tmp_path): # Read the VOTABLE with np.errstate(over="ignore"): # https://github.com/astropy/astropy/issues/13341 votable = parse(get_pkg_data_filename("data/regression.xml")) table = votable.get_first_table() astropy_table = table.to_table() for name in table.array.dtype.names: assert np.all(astropy_table.mask[name] == table.array.mask[name]) votable2 = tree.VOTableFile.from_table(astropy_table) t = votable2.get_first_table() field_types = [ ("string_test", {"datatype": "char", "arraysize": "*"}), ("string_test_2", {"datatype": "char", "arraysize": "10"}), ("unicode_test", {"datatype": "unicodeChar", "arraysize": "*"}), ("fixed_unicode_test", {"datatype": "unicodeChar", "arraysize": "10"}), ("string_array_test", {"datatype": "char", "arraysize": "4"}), ("unsignedByte", {"datatype": "unsignedByte"}), ("short", {"datatype": "short"}), ("int", {"datatype": "int"}), ("long", {"datatype": "long"}), ("double", {"datatype": "double"}), ("float", {"datatype": "float"}), ("array", {"datatype": "long", "arraysize": "2*"}), ("bit", {"datatype": "bit"}), ("bitarray", {"datatype": "bit", "arraysize": "3x2"}), ("bitvararray", {"datatype": "bit", "arraysize": "*"}), ("bitvararray2", {"datatype": "bit", "arraysize": "3x2*"}), ("floatComplex", {"datatype": "floatComplex"}), ("doubleComplex", {"datatype": "doubleComplex"}), ("doubleComplexArray", {"datatype": "doubleComplex", "arraysize": "*"}), ("doubleComplexArrayFixed", {"datatype": "doubleComplex", "arraysize": "2"}), ("boolean", {"datatype": "bit"}), ("booleanArray", {"datatype": "bit", "arraysize": "4"}), ("nulls", {"datatype": "int"}), ("nulls_array", {"datatype": "int", "arraysize": "2x2"}), ("precision1", {"datatype": "double"}), ("precision2", {"datatype": "double"}), ("doublearray", {"datatype": "double", "arraysize": "*"}), ("bitarray2", {"datatype": "bit", "arraysize": "16"}), ] for field, type in zip(t.fields, field_types): name, d = type assert field.ID == name assert ( field.datatype == d["datatype"] ), f'{name} expected {d["datatype"]} but get {field.datatype}' if "arraysize" in d: assert field.arraysize == d["arraysize"] # W39: Bit values can not be masked with pytest.warns(W39): writeto(votable2, str(tmp_path / "through_table.xml")) def test_read_from_tilde_path(home_is_data): # Just test that these run without error for tilde-paths path = os.path.join("~", "regression.xml") with np.errstate(over="ignore"): # https://github.com/astropy/astropy/issues/13341 votable = parse(path) Table.read(path, format="votable", table_id="main_table") def test_read_through_table_interface(tmp_path): with np.errstate(over="ignore"): # https://github.com/astropy/astropy/issues/13341 with get_pkg_data_fileobj("data/regression.xml", encoding="binary") as fd: t = Table.read(fd, format="votable", table_id="main_table") assert len(t) == 5 # Issue 8354 assert t["float"].format is None fn = tmp_path / "table_interface.xml" # W39: Bit values can not be masked with pytest.warns(W39): t.write(fn, table_id="FOO", format="votable") with open(fn, "rb") as fd: t2 = Table.read(fd, format="votable", table_id="FOO") assert len(t2) == 5 def test_read_through_table_interface2(): with np.errstate(over="ignore"): # https://github.com/astropy/astropy/issues/13341 with get_pkg_data_fileobj("data/regression.xml", encoding="binary") as fd: t = Table.read(fd, format="votable", table_id="last_table") assert len(t) == 0 def test_pass_kwargs_through_table_interface(): # Table.read() should pass on keyword arguments meant for parse() filename = get_pkg_data_filename("data/nonstandard_units.xml") t = Table.read(filename, format="votable", unit_format="generic") assert t["Flux1"].unit == Unit("erg / (Angstrom cm2 s)") def test_names_over_ids(): with get_pkg_data_fileobj("data/names.xml", encoding="binary") as fd: votable = parse(fd) table = votable.get_first_table().to_table(use_names_over_ids=True) assert table.colnames == [ "Name", "GLON", "GLAT", "RAdeg", "DEdeg", "Jmag", "Hmag", "Kmag", "G3.6mag", "G4.5mag", "G5.8mag", "G8.0mag", "4.5mag", "8.0mag", "Emag", "24mag", "f_Name", ] def test_explicit_ids(): with get_pkg_data_fileobj("data/names.xml", encoding="binary") as fd: votable = parse(fd) table = votable.get_first_table().to_table(use_names_over_ids=False) assert table.colnames == [ "col1", "col2", "col3", "col4", "col5", "col6", "col7", "col8", "col9", "col10", "col11", "col12", "col13", "col14", "col15", "col16", "col17", ] def test_table_read_with_unnamed_tables(): """ Issue #927. """ with get_pkg_data_fileobj("data/names.xml", encoding="binary") as fd: t = Table.read(fd, format="votable") assert len(t) == 1 def test_votable_path_object(): """ Testing when votable is passed as pathlib.Path object #4412. """ fpath = pathlib.Path(get_pkg_data_filename("data/names.xml")) table = parse(fpath).get_first_table().to_table() assert len(table) == 1 assert int(table[0][3]) == 266 def test_from_table_without_mask(): t = Table() c = Column(data=[1, 2, 3], name="a") t.add_column(c) output = io.BytesIO() t.write(output, format="votable") def test_write_with_format(): t = Table() c = Column(data=[1, 2, 3], name="a") t.add_column(c) output = io.BytesIO() t.write(output, format="votable", tabledata_format="binary") obuff = output.getvalue() assert b'VOTABLE version="1.4"' in obuff assert b"BINARY" in obuff assert b"TABLEDATA" not in obuff output = io.BytesIO() t.write(output, format="votable", tabledata_format="binary2") obuff = output.getvalue() assert b'VOTABLE version="1.4"' in obuff assert b"BINARY2" in obuff assert b"TABLEDATA" not in obuff def test_write_overwrite(tmp_path): t = simple_table(3, 3) filename = tmp_path / "overwrite_test.vot" t.write(filename, format="votable") with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH): t.write(filename, format="votable") t.write(filename, format="votable", overwrite=True) def test_write_tilde_path(home_is_tmpdir): fname = os.path.join("~", "output") t = Table() t["a"] = [1, 2, 3] t.write(fname, format="votable", tabledata_format="binary") # Ensure the tilde-prefixed path wasn't treated literally assert not os.path.exists(fname) with open(os.path.expanduser(fname)) as f: obuff = f.read() assert 'VOTABLE version="1.4"' in obuff assert "BINARY" in obuff assert "TABLEDATA" not in obuff @pytest.mark.parametrize("path_format", ["plain", "tilde"]) def test_writeto(path_format, tmp_path, home_is_tmpdir): if path_format == "plain": # pathlib.Path objects are not accepted by votable.writeto, so convert # to a string fname = str(tmp_path / "writeto_test.vot") else: fname = os.path.join("~", "writeto_test.vot") t = Table() t["a"] = [1, 2, 3] vt = from_table(t) writeto(vt, fname) if path_format == "tilde": # Ensure the tilde-prefixed path wasn't treated literally assert not os.path.exists(fname) with open(os.path.expanduser(fname)) as f: obuff = f.read() assert 'VOTABLE version="1.4"' in obuff assert "BINARY" not in obuff assert "TABLEDATA" in obuff def test_empty_table(): votable = parse(get_pkg_data_filename("data/empty_table.xml")) table = votable.get_first_table() table.to_table() def test_no_field_not_empty_table(): votable = parse(get_pkg_data_filename("data/no_field_not_empty_table.xml")) table = votable.get_first_table() assert len(table.fields) == 0 assert len(table.infos) == 1 def test_no_field_not_empty_table_exception(): with pytest.raises(E25): parse( get_pkg_data_filename("data/no_field_not_empty_table.xml"), verify="exception", ) def test_binary2_masked_strings(): """ Issue #8995. """ # Read a VOTable which sets the null mask bit for each empty string value. votable = parse(get_pkg_data_filename("data/binary2_masked_strings.xml")) table = votable.get_first_table() astropy_table = table.to_table() # Ensure string columns have no masked values and can be written out assert not np.any(table.array.mask["epoch_photometry_url"]) output = io.BytesIO() astropy_table.write(output, format="votable") def test_validate_output_invalid(): """ Issue #12603. Test that we get the correct output from votable.validate with an invalid votable. """ # A votable with errors invalid_votable_filepath = get_pkg_data_filename("data/regression.xml") # When output is None, check that validate returns validation output as a string validate_out = validate(invalid_votable_filepath, output=None) assert isinstance(validate_out, str) # Check for known error string assert "E02: Incorrect number of elements in array." in validate_out # When output is not set, check that validate returns a bool validate_out = validate(invalid_votable_filepath) assert isinstance(validate_out, bool) # Check that validation output is correct (votable is not valid) assert validate_out is False def test_validate_output_valid(): """ Issue #12603. Test that we get the correct output from votable.validate with a valid votable. """ # A valid votable. (Example from the votable standard: # https://www.ivoa.net/documents/VOTable/20191021/REC-VOTable-1.4-20191021.html ) valid_votable_filepath = get_pkg_data_filename("data/valid_votable.xml") # When output is None, check that validate returns validation output as a string validate_out = validate(valid_votable_filepath, output=None) assert isinstance(validate_out, str) # Check for known good output string assert "astropy.io.votable found no violations" in validate_out # When output is not set, check that validate returns a bool validate_out = validate(valid_votable_filepath) assert isinstance(validate_out, bool) # Check that validation output is correct (votable is valid) assert validate_out is True def test_validate_tilde_path(home_is_data): validate(os.path.join("~", "valid_votable.xml")) def test_is_votable_tilde_path(home_is_data): assert is_votable(os.path.join("~", "valid_votable.xml")) class TestVerifyOptions: # Start off by checking the default (ignore) def test_default(self): parse(get_pkg_data_filename("data/gemini.xml")) # Then try the various explicit options def test_verify_ignore(self): parse(get_pkg_data_filename("data/gemini.xml"), verify="ignore") def test_verify_warn(self): with pytest.warns(VOWarning) as w: parse(get_pkg_data_filename("data/gemini.xml"), verify="warn") assert len(w) == 24 def test_verify_exception(self): with pytest.raises(VOWarning): parse(get_pkg_data_filename("data/gemini.xml"), verify="exception") # Make sure the deprecated pedantic option still works for now def test_pedantic_false(self): with pytest.warns(VOWarning) as w: parse(get_pkg_data_filename("data/gemini.xml"), pedantic=False) assert len(w) == 25 def test_pedantic_true(self): with pytest.warns(AstropyDeprecationWarning): with pytest.raises(VOWarning): parse(get_pkg_data_filename("data/gemini.xml"), pedantic=True) # Make sure that the default behavior can be set via configuration items def test_conf_verify_ignore(self): with conf.set_temp("verify", "ignore"): parse(get_pkg_data_filename("data/gemini.xml")) def test_conf_verify_warn(self): with conf.set_temp("verify", "warn"): with pytest.warns(VOWarning) as w: parse(get_pkg_data_filename("data/gemini.xml")) assert len(w) == 24 def test_conf_verify_exception(self): with conf.set_temp("verify", "exception"): with pytest.raises(VOWarning): parse(get_pkg_data_filename("data/gemini.xml")) # And make sure the old configuration item will keep working def test_conf_pedantic_false(self, tmp_path): with set_temp_config(tmp_path): with open(tmp_path / "astropy" / "astropy.cfg", "w") as f: f.write("[io.votable]\npedantic = False") reload_config("astropy.io.votable") with pytest.warns(VOWarning) as w: parse(get_pkg_data_filename("data/gemini.xml")) assert len(w) == 25 def test_conf_pedantic_true(self, tmp_path): with set_temp_config(tmp_path): with open(tmp_path / "astropy" / "astropy.cfg", "w") as f: f.write("[io.votable]\npedantic = True") reload_config("astropy.io.votable") with pytest.warns(AstropyDeprecationWarning): with pytest.raises(VOWarning): parse(get_pkg_data_filename("data/gemini.xml"))
f0b93ebc2a7372ea5b1b9309f7f53357b40487d302759389c0257e1e9fa668ee
# Licensed under a 3-clause BSD style license - see LICENSE.rst import io import pytest from astropy.io.votable import tree from astropy.io.votable.exceptions import W07, W08, W21, W41 from astropy.io.votable.table import parse from astropy.io.votable.tree import Resource, VOTableFile from astropy.utils.data import get_pkg_data_filename from astropy.utils.exceptions import AstropyDeprecationWarning def test_check_astroyear_fail(): config = {"verify": "exception"} field = tree.Field(None, name="astroyear", arraysize="1") with pytest.raises(W07): tree.check_astroyear("X2100", field, config) def test_string_fail(): config = {"verify": "exception"} with pytest.raises(W08): tree.check_string(42, "foo", config) def test_make_Fields(): votable = tree.VOTableFile() # ...with one resource... resource = tree.Resource() votable.resources.append(resource) # ... with one table table = tree.Table(votable) resource.tables.append(table) table.fields.extend( [tree.Field(votable, name="Test", datatype="float", unit="mag")] ) def test_unit_format(): data = parse(get_pkg_data_filename("data/irsa-nph-error.xml")) assert data._config["version"] == "1.0" assert tree._get_default_unit_format(data._config) == "cds" data = parse(get_pkg_data_filename("data/names.xml")) assert data._config["version"] == "1.1" assert tree._get_default_unit_format(data._config) == "cds" data = parse(get_pkg_data_filename("data/gemini.xml")) assert data._config["version"] == "1.2" assert tree._get_default_unit_format(data._config) == "cds" data = parse(get_pkg_data_filename("data/binary2_masked_strings.xml")) assert data._config["version"] == "1.3" assert tree._get_default_unit_format(data._config) == "cds" data = parse(get_pkg_data_filename("data/timesys.xml")) assert data._config["version"] == "1.4" assert tree._get_default_unit_format(data._config) == "vounit" def test_namespace_warning(): """ A version 1.4 VOTable must use the same namespace as 1.3. (see https://www.ivoa.net/documents/VOTable/20191021/REC-VOTable-1.4-20191021.html#ToC16). """ bad_namespace = b"""<?xml version="1.0" encoding="utf-8"?> <VOTABLE version="1.4" xmlns="http://www.ivoa.net/xml/VOTable/v1.4" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"> <RESOURCE/> </VOTABLE> """ with pytest.warns(W41): parse(io.BytesIO(bad_namespace), verify="exception") good_namespace_14 = b"""<?xml version="1.0" encoding="utf-8"?> <VOTABLE version="1.4" xmlns="http://www.ivoa.net/xml/VOTable/v1.3" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"> <RESOURCE/> </VOTABLE> """ parse(io.BytesIO(good_namespace_14), verify="exception") good_namespace_13 = b"""<?xml version="1.0" encoding="utf-8"?> <VOTABLE version="1.3" xmlns="http://www.ivoa.net/xml/VOTable/v1.3" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"> <RESOURCE/> </VOTABLE> """ parse(io.BytesIO(good_namespace_13), verify="exception") def test_version(): """ VOTableFile.__init__ allows versions of '1.0', '1.1', '1.2', '1.3' and '1.4'. The '1.0' is curious since other checks in parse() and the version setter do not allow '1.0'. This test confirms that behavior for now. A future change may remove the '1.0'. """ # Exercise the checks in __init__ with pytest.warns(AstropyDeprecationWarning): VOTableFile(version="1.0") for version in ("1.1", "1.2", "1.3", "1.4"): VOTableFile(version=version) for version in ("0.9", "2.0"): with pytest.raises( ValueError, match=r"should be in \('1.0', '1.1', '1.2', '1.3', '1.4'\)." ): VOTableFile(version=version) # Exercise the checks in the setter vot = VOTableFile() for version in ("1.1", "1.2", "1.3", "1.4"): vot.version = version for version in ("1.0", "2.0"): with pytest.raises( ValueError, match=r"supports VOTable versions '1.1', '1.2', '1.3', '1.4'$" ): vot.version = version # Exercise the checks in the parser. begin = b'<?xml version="1.0" encoding="utf-8"?><VOTABLE version="' middle = b'" xmlns="http://www.ivoa.net/xml/VOTable/v' end = ( b'" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><RESOURCE/></VOTABLE>' ) # Valid versions for bversion in (b"1.1", b"1.2", b"1.3"): parse( io.BytesIO(begin + bversion + middle + bversion + end), verify="exception" ) parse(io.BytesIO(begin + b"1.4" + middle + b"1.3" + end), verify="exception") # Invalid versions for bversion in (b"1.0", b"2.0"): with pytest.warns(W21): parse( io.BytesIO(begin + bversion + middle + bversion + end), verify="exception", ) def votable_xml_string(version): votable_file = VOTableFile(version=version) votable_file.resources.append(Resource()) xml_bytes = io.BytesIO() votable_file.to_xml(xml_bytes) xml_bytes.seek(0) bstring = xml_bytes.read() s = bstring.decode("utf-8") return s def test_votable_tag(): xml = votable_xml_string("1.1") assert 'xmlns="http://www.ivoa.net/xml/VOTable/v1.1"' in xml assert 'xsi:noNamespaceSchemaLocation="http://www.ivoa.net/xml/VOTable/v1.1"' in xml xml = votable_xml_string("1.2") assert 'xmlns="http://www.ivoa.net/xml/VOTable/v1.2"' in xml assert 'xsi:noNamespaceSchemaLocation="http://www.ivoa.net/xml/VOTable/v1.2"' in xml xml = votable_xml_string("1.3") assert 'xmlns="http://www.ivoa.net/xml/VOTable/v1.3"' in xml assert 'xsi:schemaLocation="http://www.ivoa.net/xml/VOTable/v1.3 ' assert 'http://www.ivoa.net/xml/VOTable/VOTable-1.3.xsd"' in xml xml = votable_xml_string("1.4") assert 'xmlns="http://www.ivoa.net/xml/VOTable/v1.3"' in xml assert 'xsi:schemaLocation="http://www.ivoa.net/xml/VOTable/v1.3 ' assert 'http://www.ivoa.net/xml/VOTable/VOTable-1.4.xsd"' in xml
7e59cc8c083f4de6e52d787751e493806feb1938382b49e744bfebac1bebcc65
# Licensed under a 3-clause BSD style license - see LICENSE.rst import io # THIRD-PARTY import numpy as np import pytest from numpy.testing import assert_array_equal # LOCAL from astropy.io.votable import converters, exceptions, tree from astropy.io.votable.table import parse_single_table from astropy.utils.data import get_pkg_data_filename def test_invalid_arraysize(): with pytest.raises(exceptions.E13): field = tree.Field(None, name="broken", datatype="char", arraysize="foo") converters.get_converter(field) def test_oversize_char(): config = {"verify": "exception"} with pytest.warns(exceptions.W47) as w: field = tree.Field(None, name="c", datatype="char", config=config) c = converters.get_converter(field, config=config) assert len(w) == 1 with pytest.warns(exceptions.W46) as w: c.parse("XXX") assert len(w) == 1 def test_char_mask(): config = {"verify": "exception"} field = tree.Field(None, name="c", arraysize="1", datatype="char", config=config) c = converters.get_converter(field, config=config) assert c.output("Foo", True) == "" def test_oversize_unicode(): config = {"verify": "exception"} with pytest.warns(exceptions.W46) as w: field = tree.Field( None, name="c2", datatype="unicodeChar", arraysize="1", config=config ) c = converters.get_converter(field, config=config) c.parse("XXX") assert len(w) == 1 def test_unicode_mask(): config = {"verify": "exception"} field = tree.Field( None, name="c", arraysize="1", datatype="unicodeChar", config=config ) c = converters.get_converter(field, config=config) assert c.output("Foo", True) == "" def test_unicode_as_char(): config = {"verify": "exception"} field = tree.Field( None, name="unicode_in_char", datatype="char", arraysize="*", config=config ) c = converters.get_converter(field, config=config) # Test parsing. c.parse("XYZ") # ASCII succeeds with pytest.warns( exceptions.W55, match=( r'FIELD \(unicode_in_char\) has datatype="char" but contains non-ASCII' r" value" ), ): c.parse("zła") # non-ASCII # Test output. c.output("XYZ", False) # ASCII str succeeds c.output(b"XYZ", False) # ASCII bytes succeeds value = "zła" value_bytes = value.encode("utf-8") with pytest.warns(exceptions.E24, match=r"E24: Attempt to write non-ASCII value"): c.output(value, False) # non-ASCII str raises with pytest.warns(exceptions.E24, match=r"E24: Attempt to write non-ASCII value"): c.output(value_bytes, False) # non-ASCII bytes raises def test_unicode_as_char_binary(): config = {"verify": "exception"} field = tree.Field( None, name="unicode_in_char", datatype="char", arraysize="*", config=config ) c = converters.get_converter(field, config=config) c._binoutput_var("abc", False) # ASCII succeeds with pytest.raises(exceptions.E24, match=r"E24: Attempt to write non-ASCII value"): c._binoutput_var("zła", False) field = tree.Field( None, name="unicode_in_char", datatype="char", arraysize="3", config=config ) c = converters.get_converter(field, config=config) c._binoutput_fixed("xyz", False) with pytest.raises(exceptions.E24, match=r"E24: Attempt to write non-ASCII value"): c._binoutput_fixed("zła", False) def test_wrong_number_of_elements(): config = {"verify": "exception"} field = tree.Field(None, name="c", datatype="int", arraysize="2x3*", config=config) c = converters.get_converter(field, config=config) with pytest.raises(exceptions.E02): c.parse("2 3 4 5 6") def test_float_mask(): config = {"verify": "exception"} field = tree.Field(None, name="c", datatype="float", config=config) c = converters.get_converter(field, config=config) assert c.parse("") == (c.null, True) with pytest.raises(ValueError): c.parse("null") def test_float_mask_permissive(): config = {"verify": "ignore"} field = tree.Field(None, name="c", datatype="float", config=config) # config needs to be also passed into parse() to work. # https://github.com/astropy/astropy/issues/8775 c = converters.get_converter(field, config=config) assert c.parse("null", config=config) == (c.null, True) def test_double_array(): config = {"verify": "exception", "version_1_3_or_later": True} field = tree.Field(None, name="c", datatype="double", arraysize="3", config=config) data = (1.0, 2.0, 3.0) c = converters.get_converter(field, config=config) assert c.output(1.0, False) == "1" assert c.output(1.0, [False, False]) == "1" assert c.output(data, False) == "1 2 3" assert c.output(data, [False, False, False]) == "1 2 3" assert c.output(data, [False, False, True]) == "1 2 NaN" assert c.output(data, [False, False]) == "1 2" a = c.parse("1 2 3", config=config) assert_array_equal(a[0], data) assert_array_equal(a[1], False) with pytest.raises(exceptions.E02): c.parse("1", config=config) with pytest.raises(AttributeError), pytest.warns(exceptions.E02): c.parse("1") with pytest.raises(exceptions.E02): c.parse("2 3 4 5 6", config=config) with pytest.warns(exceptions.E02): a = c.parse("2 3 4 5 6") assert_array_equal(a[0], [2, 3, 4]) assert_array_equal(a[1], False) def test_complex_array_vararray(): config = {"verify": "exception"} field = tree.Field( None, name="c", datatype="floatComplex", arraysize="2x3*", config=config ) c = converters.get_converter(field, config=config) with pytest.raises(exceptions.E02): c.parse("2 3 4 5 6") def test_complex_array_vararray2(): config = {"verify": "exception"} field = tree.Field( None, name="c", datatype="floatComplex", arraysize="2x3*", config=config ) c = converters.get_converter(field, config=config) x = c.parse("") assert len(x[0]) == 0 def test_complex_array_vararray3(): config = {"verify": "exception"} field = tree.Field( None, name="c", datatype="doubleComplex", arraysize="2x3*", config=config ) c = converters.get_converter(field, config=config) x = c.parse("1 2 3 4 5 6 7 8 9 10 11 12") assert len(x) == 2 assert np.all(x[0][0][0] == complex(1, 2)) def test_complex_vararray(): config = {"verify": "exception"} field = tree.Field( None, name="c", datatype="doubleComplex", arraysize="*", config=config ) c = converters.get_converter(field, config=config) x = c.parse("1 2 3 4") assert len(x) == 2 assert x[0][0] == complex(1, 2) def test_complex(): config = {"verify": "exception"} field = tree.Field(None, name="c", datatype="doubleComplex", config=config) c = converters.get_converter(field, config=config) with pytest.raises(exceptions.E03): c.parse("1 2 3") def test_bit(): config = {"verify": "exception"} field = tree.Field(None, name="c", datatype="bit", config=config) c = converters.get_converter(field, config=config) with pytest.raises(exceptions.E04): c.parse("T") def test_bit_mask(): config = {"verify": "exception"} with pytest.warns(exceptions.W39) as w: field = tree.Field(None, name="c", datatype="bit", config=config) c = converters.get_converter(field, config=config) c.output(True, True) assert len(w) == 1 def test_boolean(): config = {"verify": "exception"} field = tree.Field(None, name="c", datatype="boolean", config=config) c = converters.get_converter(field, config=config) with pytest.raises(exceptions.E05): c.parse("YES") def test_boolean_array(): config = {"verify": "exception"} field = tree.Field(None, name="c", datatype="boolean", arraysize="*", config=config) c = converters.get_converter(field, config=config) r, mask = c.parse("TRUE FALSE T F 0 1") assert_array_equal(r, [True, False, True, False, False, True]) def test_invalid_type(): config = {"verify": "exception"} with pytest.raises(exceptions.E06): field = tree.Field(None, name="c", datatype="foobar", config=config) converters.get_converter(field, config=config) def test_precision(): config = {"verify": "exception"} field = tree.Field(None, name="c", datatype="float", precision="E4", config=config) c = converters.get_converter(field, config=config) assert c.output(266.248, False) == "266.2" field = tree.Field(None, name="c", datatype="float", precision="F4", config=config) c = converters.get_converter(field, config=config) assert c.output(266.248, False) == "266.2480" def test_integer_overflow(): config = {"verify": "exception"} field = tree.Field(None, name="c", datatype="int", config=config) c = converters.get_converter(field, config=config) with pytest.raises(exceptions.W51): c.parse("-2208988800", config=config) def test_float_default_precision(): config = {"verify": "exception"} field = tree.Field(None, name="c", datatype="float", arraysize="4", config=config) c = converters.get_converter(field, config=config) assert ( c.output([1, 2, 3, 8.9990234375], [False, False, False, False]) == "1 2 3 8.9990234375" ) def test_vararray(): votable = tree.VOTableFile() resource = tree.Resource() votable.resources.append(resource) table = tree.Table(votable) resource.tables.append(table) tabarr = [] heads = ["headA", "headB", "headC"] types = ["char", "double", "int"] vals = [["A", 1.0, 2], ["B", 2.0, 3], ["C", 3.0, 4]] for i in range(len(heads)): tabarr.append( tree.Field(votable, name=heads[i], datatype=types[i], arraysize="*") ) table.fields.extend(tabarr) table.create_arrays(len(vals)) for i in range(len(vals)): values = tuple(vals[i]) table.array[i] = values buff = io.BytesIO() votable.to_xml(buff) def test_gemini_v1_2(): """ see Pull Request 4782 or Issue 4781 for details. """ table = parse_single_table(get_pkg_data_filename("data/gemini.xml")) assert table is not None tt = table.to_table() assert ( tt["access_url"][0] == "http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/data/pub/GEMINI/" "S20120515S0064?runid=bx9b1o8cvk1qesrt" )
32f1caebf640a5968ec08cca8fc6ad5579a4ddc54e2b6892bcbd904436830d63
# Licensed under a 3-clause BSD style license - see LICENSE.rst # STDLIB import contextlib import os import re from math import ceil from astropy import online_docs_root from astropy.io.votable import exceptions from astropy.utils.xml.writer import XMLWriter, xml_escape html_header = """<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML Basic 1.0//EN" "http://www.w3.org/TR/xhtml-basic/xhtml-basic10.dtd"> """ default_style = """ body { font-family: sans-serif } a { text-decoration: none } .highlight { color: red; font-weight: bold; text-decoration: underline; } .green { background-color: #ddffdd } .red { background-color: #ffdddd } .yellow { background-color: #ffffdd } tr:hover { background-color: #dddddd } table { border-width: 1px; border-spacing: 0px; border-style: solid; border-color: gray; border-collapse: collapse; background-color: white; padding: 5px; } table th { border-width: 1px; padding: 5px; border-style: solid; border-color: gray; } table td { border-width: 1px; padding: 5px; border-style: solid; border-color: gray; } """ @contextlib.contextmanager def make_html_header(w): w.write(html_header) with w.tag("html", xmlns="http://www.w3.org/1999/xhtml", lang="en-US"): with w.tag("head"): w.element("title", "VO Validation results") w.element("style", default_style) with w.tag("body"): yield def write_source_line(w, line, nchar=0): part1 = xml_escape(line[:nchar].decode("utf-8")) char = xml_escape(line[nchar : nchar + 1].decode("utf-8")) part2 = xml_escape(line[nchar + 1 :].decode("utf-8")) w.write(" ") w.write(part1) w.write(f'<span class="highlight">{char}</span>') w.write(part2) w.write("\n\n") def write_warning(w, line, xml_lines): warning = exceptions.parse_vowarning(line) if not warning["is_something"]: w.data(line) else: w.write(f"Line {warning['nline']:d}: ") if warning["warning"]: w.write( '<a href="{}/{}">{}</a>: '.format( online_docs_root, warning["doc_url"], warning["warning"] ) ) msg = warning["message"] if not isinstance(warning["message"], str): msg = msg.decode("utf-8") w.write(xml_escape(msg)) w.write("\n") if 1 <= warning["nline"] < len(xml_lines): write_source_line(w, xml_lines[warning["nline"] - 1], warning["nchar"]) def write_votlint_warning(w, line, xml_lines): match = re.search( r"(WARNING|ERROR|INFO) \(l.(?P<line>[0-9]+), c.(?P<column>[0-9]+)\):" r" (?P<rest>.*)", line, ) if match: w.write(f"Line {int(match.group('line'))}: {xml_escape(match.group('rest'))}\n") write_source_line( w, xml_lines[int(match.group("line")) - 1], int(match.group("column")) - 1 ) else: w.data(line) w.data("\n") def write_result(result): if "network_error" in result and result["network_error"] is not None: return xml = result.get_xml_content() xml_lines = xml.splitlines() path = os.path.join(result.get_dirpath(), "index.html") with open(path, "w", encoding="utf-8") as fd: w = XMLWriter(fd) with make_html_header(w): with w.tag("p"): with w.tag("a", href="vo.xml"): w.data(result.url.decode("ascii")) w.element("hr") with w.tag("pre"): w._flush() for line in result["warnings"]: write_warning(w, line, xml_lines) if result["xmllint"] is False: w.element("hr") w.element("p", "xmllint results:") content = result["xmllint_content"] if not isinstance(content, str): content = content.decode("ascii") content = content.replace(result.get_dirpath() + "/", "") with w.tag("pre"): w.data(content) if "votlint" in result: if result["votlint"] is False: w.element("hr") w.element("p", "votlint results:") content = result["votlint_content"] if not isinstance(content, str): content = content.decode("ascii") with w.tag("pre"): w._flush() for line in content.splitlines(): write_votlint_warning(w, line, xml_lines) def write_result_row(w, result): with w.tag("tr"): with w.tag("td"): if "network_error" in result and result["network_error"] is not None: w.data(result.url.decode("ascii")) else: w.element( "a", result.url.decode("ascii"), href=f"{result.get_htmlpath()}/index.html", ) if "network_error" in result and result["network_error"] is not None: w.element("td", str(result["network_error"]), attrib={"class": "red"}) w.element("td", "-") w.element("td", "-") w.element("td", "-") w.element("td", "-") else: w.element("td", "-", attrib={"class": "green"}) if result["nexceptions"]: cls = "red" msg = "Fatal" elif result["nwarnings"]: cls = "yellow" msg = str(result["nwarnings"]) else: cls = "green" msg = "-" w.element("td", msg, attrib={"class": cls}) msg = result["version"] if result["xmllint"] is None: cls = "" elif result["xmllint"] is False: cls = "red" else: cls = "green" w.element("td", msg, attrib={"class": cls}) if result["expected"] == "good": cls = "green" msg = "-" elif result["expected"] == "broken": cls = "red" msg = "net" elif result["expected"] == "incorrect": cls = "yellow" msg = "invalid" w.element("td", msg, attrib={"class": cls}) if "votlint" in result: if result["votlint"]: cls = "green" msg = "Passed" else: cls = "red" msg = "Failed" else: cls = "" msg = "?" w.element("td", msg, attrib={"class": cls}) def write_table(basename, name, results, root="results", chunk_size=500): def write_page_links(j): if npages <= 1: return with w.tag("center"): if j > 0: w.element("a", "<< ", href=f"{basename}_{j - 1:02d}.html") for i in range(npages): if i == j: w.data(str(i + 1)) else: w.element("a", str(i + 1), href=f"{basename}_{i:02d}.html") w.data(" ") if j < npages - 1: w.element("a", ">>", href=f"{basename}_{j + 1:02d}.html") npages = int(ceil(float(len(results)) / chunk_size)) for i, j in enumerate(range(0, max(len(results), 1), chunk_size)): subresults = results[j : j + chunk_size] path = os.path.join(root, f"{basename}_{i:02d}.html") with open(path, "w", encoding="utf-8") as fd: w = XMLWriter(fd) with make_html_header(w): write_page_links(i) w.element("h2", name) with w.tag("table"): with w.tag("tr"): w.element("th", "URL") w.element("th", "Network") w.element("th", "Warnings") w.element("th", "Schema") w.element("th", "Expected") w.element("th", "votlint") for result in subresults: write_result_row(w, result) write_page_links(i) def add_subset(w, basename, name, subresults, inside=["p"], total=None): with w.tag("tr"): subresults = list(subresults) if total is None: total = len(subresults) if total == 0: # pragma: no cover percentage = 0.0 else: percentage = float(len(subresults)) / total with w.tag("td"): for element in inside: w.start(element) w.element("a", name, href=f"{basename}_00.html") for element in reversed(inside): w.end(element) numbers = f"{len(subresults):d} ({percentage:.2%})" with w.tag("td"): w.data(numbers) def write_index(subsets, results, root="results"): path = os.path.join(root, "index.html") with open(path, "w", encoding="utf-8") as fd: w = XMLWriter(fd) with make_html_header(w): w.element("h1", "VO Validation results") with w.tag("table"): for subset in subsets: add_subset(w, *subset, total=len(results)) def write_index_table( root, basename, name, subresults, inside=None, total=None, chunk_size=500 ): if total is None: total = len(subresults) percentage = float(len(subresults)) / total numbers = f"{len(subresults):d} ({percentage:.2%})" write_table(basename, name + " " + numbers, subresults, root, chunk_size)
5d67ebf834036042f33e264abcb7614d9bd39c027508d3b86562b523bc42ce46
# Licensed under a 3-clause BSD style license - see LICENSE.rst from functools import wraps import pytest from astropy.utils.compat.optional_deps import HAS_PYTEST_MPL def figure_test(*args, **kwargs): """ A decorator that defines a figure test. This automatically decorates tests with mpl_image_compare with common options used by all figure tests in astropy, and also adds the decorator to allow remote data to be accessed. """ # NOTE: the savefig_kwargs option below is to avoid using PNG files with # the matplotlib version embedded since this changes for every developer # version. tolerance = kwargs.pop("tolerance", 0) style = kwargs.pop("style", {}) savefig_kwargs = kwargs.pop("savefig_kwargs", {}) savefig_kwargs["metadata"] = {"Software": None} def decorator(test_function): @pytest.mark.remote_data @pytest.mark.mpl_image_compare( tolerance=tolerance, style=style, savefig_kwargs=savefig_kwargs, **kwargs ) @pytest.mark.skipif( not HAS_PYTEST_MPL, reason="pytest-mpl is required for the figure tests" ) @wraps(test_function) def test_wrapper(*args, **kwargs): return test_function(*args, **kwargs) return test_wrapper # If the decorator was used without any arguments, the only positional # argument will be the test to decorate so we do the following: if len(args) == 1: return decorator(*args) return decorator
51e584d1853e3faf879161f082f312e28b78c83ebf3acec41028978ff366917b
import abc import numpy as np from astropy.timeseries.binned import BinnedTimeSeries from astropy.timeseries.sampled import TimeSeries __all__ = ["BasePeriodogram"] class BasePeriodogram: @abc.abstractmethod def __init__(self, t, y, dy=None): pass @classmethod def from_timeseries( cls, timeseries, signal_column_name=None, uncertainty=None, **kwargs ): """ Initialize a periodogram from a time series object. If a binned time series is passed, the time at the center of the bins is used. Also note that this method automatically gets rid of NaN/undefined values when initializing the periodogram. Parameters ---------- signal_column_name : str The name of the column containing the signal values to use. uncertainty : str or float or `~astropy.units.Quantity`, optional The name of the column containing the errors on the signal, or the value to use for the error, if a scalar. **kwargs Additional keyword arguments are passed to the initializer for this periodogram class. """ if signal_column_name is None: raise ValueError("signal_column_name should be set to a valid column name") y = timeseries[signal_column_name] keep = ~np.isnan(y) if isinstance(uncertainty, str): dy = timeseries[uncertainty] keep &= ~np.isnan(dy) dy = dy[keep] else: dy = uncertainty if isinstance(timeseries, TimeSeries): time = timeseries.time elif isinstance(timeseries, BinnedTimeSeries): time = timeseries.time_bin_center else: raise TypeError( "Input time series should be an instance of " "TimeSeries or BinnedTimeSeries" ) return cls(time[keep], y[keep], dy=dy, **kwargs)
ee917013cc14f9edc90a81b0819f315943a3e8aa17527919f561df299d80dea7
# Licensed under a 3-clause BSD style license - see LICENSE.rst import warnings import numpy as np from astropy.io import fits, registry from astropy.table import MaskedColumn, Table from astropy.time import Time, TimeDelta from astropy.timeseries.sampled import TimeSeries __all__ = ["kepler_fits_reader"] def kepler_fits_reader(filename, unit_parse_strict="warn"): """ This serves as the FITS reader for KEPLER or TESS files within astropy-timeseries. This function should generally not be called directly, and instead this time series reader should be accessed with the :meth:`~astropy.timeseries.TimeSeries.read` method:: >>> from astropy.timeseries import TimeSeries >>> ts = TimeSeries.read('kplr33122.fits', format='kepler.fits') # doctest: +SKIP Parameters ---------- filename : `str` or `pathlib.Path` File to load. unit_parse_strict : str, optional Behaviour when encountering invalid column units in the FITS header. Default is "warn", which will emit a ``UnitsWarning`` and create a :class:`~astropy.units.core.UnrecognizedUnit`. Values are the ones allowed by the ``parse_strict`` argument of :class:`~astropy.units.core.Unit`: ``raise``, ``warn`` and ``silent``. Returns ------- ts : `~astropy.timeseries.TimeSeries` Data converted into a TimeSeries. """ hdulist = fits.open(filename) # Get the lightcurve HDU telescope = hdulist[0].header["telescop"].lower() if telescope == "tess": hdu = hdulist["LIGHTCURVE"] elif telescope == "kepler": hdu = hdulist[1] else: raise NotImplementedError( f"{hdulist[0].header['telescop']} is not implemented, only KEPLER or TESS" " are supported through this reader" ) if hdu.header["EXTVER"] > 1: raise NotImplementedError( f"Support for {hdu.header['TELESCOP']} v{hdu.header['EXTVER']} files not" " yet implemented" ) # Check time scale if hdu.header["TIMESYS"] != "TDB": raise NotImplementedError( f"Support for {hdu.header['TIMESYS']} time scale not yet implemented in" f" {hdu.header['TELESCOP']} reader" ) tab = Table.read(hdu, format="fits", unit_parse_strict=unit_parse_strict) # Some KEPLER files have a T column instead of TIME. if "T" in tab.colnames: tab.rename_column("T", "TIME") for colname in tab.colnames: unit = tab[colname].unit # Make masks nan for any column which will turn into a Quantity # later. TODO: remove once we support Masked Quantities properly? if unit and isinstance(tab[colname], MaskedColumn): tab[colname] = tab[colname].filled(np.nan) # Fix units if unit == "e-/s": tab[colname].unit = "electron/s" if unit == "pixels": tab[colname].unit = "pixel" # Rename columns to lowercase tab.rename_column(colname, colname.lower()) # Filter out NaN rows nans = np.isnan(tab["time"].data) if np.any(nans): warnings.warn(f"Ignoring {np.sum(nans)} rows with NaN times") tab = tab[~nans] # Time column is dependent on source and we correct it here reference_date = Time( hdu.header["BJDREFI"], hdu.header["BJDREFF"], scale=hdu.header["TIMESYS"].lower(), format="jd", ) time = reference_date + TimeDelta(tab["time"].data, format="jd") time.format = "isot" # Remove original time column tab.remove_column("time") hdulist.close() return TimeSeries(time=time, data=tab) registry.register_reader("kepler.fits", TimeSeries, kepler_fits_reader) registry.register_reader("tess.fits", TimeSeries, kepler_fits_reader)
5fbe8cbc64e3b7de7bdde230795078c42edfe50123c38ced5af376d9d211dd22
# Licensed under a 3-clause BSD style license - see LICENSE.rst __all__ = ["bls_fast", "bls_slow"] from functools import partial import numpy as np from ._impl import bls_impl def bls_slow(t, y, ivar, period, duration, oversample, use_likelihood): """Compute the periodogram using a brute force reference method. t : array-like Sequence of observation times. y : array-like Sequence of observations associated with times t. ivar : array-like The inverse variance of ``y``. period : array-like The trial periods where the periodogram should be computed. duration : array-like The durations that should be tested. oversample : The resolution of the phase grid in units of durations. use_likeliood : bool If true, maximize the log likelihood over phase, duration, and depth. Returns ------- power : array-like The periodogram evaluated at the periods in ``period``. depth : array-like The estimated depth of the maximum power model at each period. depth_err : array-like The 1-sigma uncertainty on ``depth``. duration : array-like The maximum power duration at each period. transit_time : array-like The maximum power phase of the transit in units of time. This indicates the mid-transit time and it will always be in the range (0, period). depth_snr : array-like The signal-to-noise with which the depth is measured at maximum power. log_likelihood : array-like The log likelihood of the maximum power model. """ f = partial(_bls_slow_one, t, y, ivar, duration, oversample, use_likelihood) return _apply(f, period) def bls_fast(t, y, ivar, period, duration, oversample, use_likelihood): """Compute the periodogram using an optimized Cython implementation. t : array-like Sequence of observation times. y : array-like Sequence of observations associated with times t. ivar : array-like The inverse variance of ``y``. period : array-like The trial periods where the periodogram should be computed. duration : array-like The durations that should be tested. oversample : The resolution of the phase grid in units of durations. use_likeliood : bool If true, maximize the log likelihood over phase, duration, and depth. Returns ------- power : array-like The periodogram evaluated at the periods in ``period``. depth : array-like The estimated depth of the maximum power model at each period. depth_err : array-like The 1-sigma uncertainty on ``depth``. duration : array-like The maximum power duration at each period. transit_time : array-like The maximum power phase of the transit in units of time. This indicates the mid-transit time and it will always be in the range (0, period). depth_snr : array-like The signal-to-noise with which the depth is measured at maximum power. log_likelihood : array-like The log likelihood of the maximum power model. """ return bls_impl(t, y, ivar, period, duration, oversample, use_likelihood) def _bls_slow_one(t, y, ivar, duration, oversample, use_likelihood, period): """A private function to compute the brute force periodogram result.""" best = (-np.inf, None) hp = 0.5 * period min_t = np.min(t) for dur in duration: # Compute the phase grid (this is set by the duration and oversample). d_phase = dur / oversample phase = np.arange(0, period + d_phase, d_phase) for t0 in phase: # Figure out which data points are in and out of transit. m_in = np.abs((t - min_t - t0 + hp) % period - hp) < 0.5 * dur m_out = ~m_in # Compute the estimates of the in and out-of-transit flux. ivar_in = np.sum(ivar[m_in]) ivar_out = np.sum(ivar[m_out]) y_in = np.sum(y[m_in] * ivar[m_in]) / ivar_in y_out = np.sum(y[m_out] * ivar[m_out]) / ivar_out # Use this to compute the best fit depth and uncertainty. depth = y_out - y_in depth_err = np.sqrt(1.0 / ivar_in + 1.0 / ivar_out) snr = depth / depth_err # Compute the log likelihood of this model. loglike = -0.5 * np.sum((y_in - y[m_in]) ** 2 * ivar[m_in]) loglike += 0.5 * np.sum((y_out - y[m_in]) ** 2 * ivar[m_in]) # Choose which objective should be used for the optimization. if use_likelihood: objective = loglike else: objective = snr # If this model is better than any before, keep it. if depth > 0 and objective > best[0]: best = ( objective, ( objective, depth, depth_err, dur, (t0 + min_t) % period, snr, loglike, ), ) return best[1] def _apply(f, period): return tuple(map(np.array, zip(*map(f, period))))
6eacdfde769206434ce2ba463cfaf19831497155f9ee435ef2d1be3e2dbe12fc
# Licensed under a 3-clause BSD style license - see LICENSE.rst __all__ = ["BoxLeastSquares", "BoxLeastSquaresResults"] import numpy as np from astropy import units from astropy import units as u from astropy.time import Time, TimeDelta from astropy.timeseries.periodograms.base import BasePeriodogram from astropy.timeseries.periodograms.lombscargle.core import has_units, strip_units from . import methods def validate_unit_consistency(reference_object, input_object): if has_units(reference_object): input_object = units.Quantity(input_object, unit=reference_object.unit) else: if has_units(input_object): input_object = units.Quantity(input_object, unit=units.one) input_object = input_object.value return input_object class BoxLeastSquares(BasePeriodogram): """Compute the box least squares periodogram. This method is a commonly used tool for discovering transiting exoplanets or eclipsing binaries in photometric time series datasets. This implementation is based on the "box least squares (BLS)" method described in [1]_ and [2]_. Parameters ---------- t : array-like, `~astropy.units.Quantity`, `~astropy.time.Time`, or `~astropy.time.TimeDelta` Sequence of observation times. y : array-like or `~astropy.units.Quantity` Sequence of observations associated with times ``t``. dy : float, array-like, or `~astropy.units.Quantity`, optional Error or sequence of observational errors associated with times ``t``. Examples -------- Generate noisy data with a transit: >>> rand = np.random.default_rng(42) >>> t = rand.uniform(0, 10, 500) >>> y = np.ones_like(t) >>> y[np.abs((t + 1.0)%2.0-1)<0.08] = 1.0 - 0.1 >>> y += 0.01 * rand.standard_normal(len(t)) Compute the transit periodogram on a heuristically determined period grid and find the period with maximum power: >>> model = BoxLeastSquares(t, y) >>> results = model.autopower(0.16) >>> results.period[np.argmax(results.power)] # doctest: +FLOAT_CMP 2.000412388152837 Compute the periodogram on a user-specified period grid: >>> periods = np.linspace(1.9, 2.1, 5) >>> results = model.power(periods, 0.16) >>> results.power # doctest: +FLOAT_CMP array([0.01723948, 0.0643028 , 0.1338783 , 0.09428816, 0.03577543]) If the inputs are AstroPy Quantities with units, the units will be validated and the outputs will also be Quantities with appropriate units: >>> from astropy import units as u >>> t = t * u.day >>> y = y * u.dimensionless_unscaled >>> model = BoxLeastSquares(t, y) >>> results = model.autopower(0.16 * u.day) >>> results.period.unit Unit("d") >>> results.power.unit Unit(dimensionless) References ---------- .. [1] Kovacs, Zucker, & Mazeh (2002), A&A, 391, 369 (arXiv:astro-ph/0206099) .. [2] Hartman & Bakos (2016), Astronomy & Computing, 17, 1 (arXiv:1605.06811) """ def __init__(self, t, y, dy=None): # If t is a TimeDelta, convert it to a quantity. The units we convert # to don't really matter since the user gets a Quantity back at the end # so can convert to any units they like. if isinstance(t, TimeDelta): t = t.to("day") # We want to expose self.t as being the times the user passed in, but # if the times are absolute, we need to convert them to relative times # internally, so we use self._trel and self._tstart for this. self.t = t if isinstance(self.t, (Time, TimeDelta)): self._tstart = self.t[0] trel = (self.t - self._tstart).to(u.day) else: self._tstart = None trel = self.t self._trel, self.y, self.dy = self._validate_inputs(trel, y, dy) def autoperiod( self, duration, minimum_period=None, maximum_period=None, minimum_n_transit=3, frequency_factor=1.0, ): """Determine a suitable grid of periods. This method uses a set of heuristics to select a conservative period grid that is uniform in frequency. This grid might be too fine for some user's needs depending on the precision requirements or the sampling of the data. The grid can be made coarser by increasing ``frequency_factor``. Parameters ---------- duration : float, array-like, or `~astropy.units.Quantity` ['time'] The set of durations that will be considered. minimum_period, maximum_period : float or `~astropy.units.Quantity` ['time'], optional The minimum/maximum periods to search. If not provided, these will be computed as described in the notes below. minimum_n_transit : int, optional If ``maximum_period`` is not provided, this is used to compute the maximum period to search by asserting that any systems with at least ``minimum_n_transits`` will be within the range of searched periods. Note that this is not the same as requiring that ``minimum_n_transits`` be required for detection. The default value is ``3``. frequency_factor : float, optional A factor to control the frequency spacing as described in the notes below. The default value is ``1.0``. Returns ------- period : array-like or `~astropy.units.Quantity` ['time'] The set of periods computed using these heuristics with the same units as ``t``. Notes ----- The default minimum period is chosen to be twice the maximum duration because there won't be much sensitivity to periods shorter than that. The default maximum period is computed as .. code-block:: python maximum_period = (max(t) - min(t)) / minimum_n_transits ensuring that any systems with at least ``minimum_n_transits`` are within the range of searched periods. The frequency spacing is given by .. code-block:: python df = frequency_factor * min(duration) / (max(t) - min(t))**2 so the grid can be made finer by decreasing ``frequency_factor`` or coarser by increasing ``frequency_factor``. """ duration = self._validate_duration(duration) baseline = strip_units(self._trel.max() - self._trel.min()) min_duration = strip_units(np.min(duration)) # Estimate the required frequency spacing # Because of the sparsity of a transit, this must be much finer than # the frequency resolution for a sinusoidal fit. For a sinusoidal fit, # df would be 1/baseline (see LombScargle), but here this should be # scaled proportionally to the duration in units of baseline. df = frequency_factor * min_duration / baseline**2 # If a minimum period is not provided, choose one that is twice the # maximum duration because we won't be sensitive to any periods # shorter than that. if minimum_period is None: minimum_period = 2.0 * strip_units(np.max(duration)) else: minimum_period = validate_unit_consistency(self._trel, minimum_period) minimum_period = strip_units(minimum_period) # If no maximum period is provided, choose one by requiring that # all signals with at least minimum_n_transit should be detectable. if maximum_period is None: if minimum_n_transit <= 1: raise ValueError("minimum_n_transit must be greater than 1") maximum_period = baseline / (minimum_n_transit - 1) else: maximum_period = validate_unit_consistency(self._trel, maximum_period) maximum_period = strip_units(maximum_period) if maximum_period < minimum_period: minimum_period, maximum_period = maximum_period, minimum_period if minimum_period <= 0.0: raise ValueError("minimum_period must be positive") # Convert bounds to frequency minimum_frequency = 1.0 / strip_units(maximum_period) maximum_frequency = 1.0 / strip_units(minimum_period) # Compute the number of frequencies and the frequency grid nf = 1 + int(np.round((maximum_frequency - minimum_frequency) / df)) return 1.0 / (maximum_frequency - df * np.arange(nf)) * self._t_unit() def autopower( self, duration, objective=None, method=None, oversample=10, minimum_n_transit=3, minimum_period=None, maximum_period=None, frequency_factor=1.0, ): """Compute the periodogram at set of heuristically determined periods. This method calls :func:`BoxLeastSquares.autoperiod` to determine the period grid and then :func:`BoxLeastSquares.power` to compute the periodogram. See those methods for documentation of the arguments. """ period = self.autoperiod( duration, minimum_n_transit=minimum_n_transit, minimum_period=minimum_period, maximum_period=maximum_period, frequency_factor=frequency_factor, ) return self.power( period, duration, objective=objective, method=method, oversample=oversample ) def power(self, period, duration, objective=None, method=None, oversample=10): """Compute the periodogram for a set of periods. Parameters ---------- period : array-like or `~astropy.units.Quantity` ['time'] The periods where the power should be computed duration : float, array-like, or `~astropy.units.Quantity` ['time'] The set of durations to test objective : {'likelihood', 'snr'}, optional The scalar that should be optimized to find the best fit phase, duration, and depth. This can be either ``'likelihood'`` (default) to optimize the log-likelihood of the model, or ``'snr'`` to optimize the signal-to-noise with which the transit depth is measured. method : {'fast', 'slow'}, optional The computational method used to compute the periodogram. This is mainly included for the purposes of testing and most users will want to use the optimized ``'fast'`` method (default) that is implemented in Cython. ``'slow'`` is a brute-force method that is used to test the results of the ``'fast'`` method. oversample : int, optional The number of bins per duration that should be used. This sets the time resolution of the phase fit with larger values of ``oversample`` yielding a finer grid and higher computational cost. Returns ------- results : BoxLeastSquaresResults The periodogram results as a :class:`BoxLeastSquaresResults` object. Raises ------ ValueError If ``oversample`` is not an integer greater than 0 or if ``objective`` or ``method`` are not valid. """ period, duration = self._validate_period_and_duration(period, duration) # Check for absurdities in the ``oversample`` choice try: oversample = int(oversample) except TypeError: raise ValueError(f"oversample must be an int, got {oversample}") if oversample < 1: raise ValueError("oversample must be greater than or equal to 1") # Select the periodogram objective if objective is None: objective = "likelihood" allowed_objectives = ["snr", "likelihood"] if objective not in allowed_objectives: raise ValueError( f"Unrecognized method '{objective}'\n" f"allowed methods are: {allowed_objectives}" ) use_likelihood = objective == "likelihood" # Select the computational method if method is None: method = "fast" allowed_methods = ["fast", "slow"] if method not in allowed_methods: raise ValueError( f"Unrecognized method '{method}'\n" f"allowed methods are: {allowed_methods}" ) # Format and check the input arrays t = np.ascontiguousarray(strip_units(self._trel), dtype=np.float64) t_ref = np.min(t) y = np.ascontiguousarray(strip_units(self.y), dtype=np.float64) if self.dy is None: ivar = np.ones_like(y) else: ivar = ( 1.0 / np.ascontiguousarray(strip_units(self.dy), dtype=np.float64) ** 2 ) # Make sure that the period and duration arrays are C-order period_fmt = np.ascontiguousarray(strip_units(period), dtype=np.float64) duration = np.ascontiguousarray(strip_units(duration), dtype=np.float64) # Select the correct implementation for the chosen method if method == "fast": bls = methods.bls_fast else: bls = methods.bls_slow # Run the implementation results = bls( t - t_ref, y - np.median(y), ivar, period_fmt, duration, oversample, use_likelihood, ) return self._format_results(t_ref, objective, period, results) def _as_relative_time(self, name, times): """ Convert the provided times (if absolute) to relative times using the current _tstart value. If the times provided are relative, they are returned without conversion (though we still do some checks). """ if isinstance(times, TimeDelta): times = times.to("day") if self._tstart is None: if isinstance(times, Time): raise TypeError( f"{name} was provided as an absolute time but " "the BoxLeastSquares class was initialized " "with relative times." ) else: if isinstance(times, Time): times = (times - self._tstart).to(u.day) else: raise TypeError( f"{name} was provided as a relative time but " "the BoxLeastSquares class was initialized " "with absolute times." ) times = validate_unit_consistency(self._trel, times) return times def _as_absolute_time_if_needed(self, name, times): """ Convert the provided times to absolute times using the current _tstart value, if needed. """ if self._tstart is not None: # Some time formats/scales can't represent dates/times too far # off from the present, so we need to mask values offset by # more than 100,000 yr (the periodogram algorithm can return # transit times of e.g 1e300 for some periods). reset = np.abs(times.to_value(u.year)) > 100000 times[reset] = 0 times = self._tstart + times times[reset] = np.nan return times def model(self, t_model, period, duration, transit_time): """Compute the transit model at the given period, duration, and phase. Parameters ---------- t_model : array-like, `~astropy.units.Quantity`, or `~astropy.time.Time` Times at which to compute the model. period : float or `~astropy.units.Quantity` ['time'] The period of the transits. duration : float or `~astropy.units.Quantity` ['time'] The duration of the transit. transit_time : float or `~astropy.units.Quantity` or `~astropy.time.Time` The mid-transit time of a reference transit. Returns ------- y_model : array-like or `~astropy.units.Quantity` The model evaluated at the times ``t_model`` with units of ``y``. """ period, duration = self._validate_period_and_duration(period, duration) transit_time = self._as_relative_time("transit_time", transit_time) t_model = strip_units(self._as_relative_time("t_model", t_model)) period = float(strip_units(period)) duration = float(strip_units(duration)) transit_time = float(strip_units(transit_time)) t = np.ascontiguousarray(strip_units(self._trel), dtype=np.float64) y = np.ascontiguousarray(strip_units(self.y), dtype=np.float64) if self.dy is None: ivar = np.ones_like(y) else: ivar = ( 1.0 / np.ascontiguousarray(strip_units(self.dy), dtype=np.float64) ** 2 ) # Compute the depth hp = 0.5 * period m_in = np.abs((t - transit_time + hp) % period - hp) < 0.5 * duration m_out = ~m_in y_in = np.sum(y[m_in] * ivar[m_in]) / np.sum(ivar[m_in]) y_out = np.sum(y[m_out] * ivar[m_out]) / np.sum(ivar[m_out]) # Evaluate the model y_model = y_out + np.zeros_like(t_model) m_model = np.abs((t_model - transit_time + hp) % period - hp) < 0.5 * duration y_model[m_model] = y_in return y_model * self._y_unit() def compute_stats(self, period, duration, transit_time): """Compute descriptive statistics for a given transit model. These statistics are commonly used for vetting of transit candidates. Parameters ---------- period : float or `~astropy.units.Quantity` ['time'] The period of the transits. duration : float or `~astropy.units.Quantity` ['time'] The duration of the transit. transit_time : float or `~astropy.units.Quantity` or `~astropy.time.Time` The mid-transit time of a reference transit. Returns ------- stats : dict A dictionary containing several descriptive statistics: - ``depth``: The depth and uncertainty (as a tuple with two values) on the depth for the fiducial model. - ``depth_odd``: The depth and uncertainty on the depth for a model where the period is twice the fiducial period. - ``depth_even``: The depth and uncertainty on the depth for a model where the period is twice the fiducial period and the phase is offset by one orbital period. - ``depth_half``: The depth and uncertainty for a model with a period of half the fiducial period. - ``depth_phased``: The depth and uncertainty for a model with the fiducial period and the phase offset by half a period. - ``harmonic_amplitude``: The amplitude of the best fit sinusoidal model. - ``harmonic_delta_log_likelihood``: The difference in log likelihood between a sinusoidal model and the transit model. If ``harmonic_delta_log_likelihood`` is greater than zero, the sinusoidal model is preferred. - ``transit_times``: The mid-transit time for each transit in the baseline. - ``per_transit_count``: An array with a count of the number of data points in each unique transit included in the baseline. - ``per_transit_log_likelihood``: An array with the value of the log likelihood for each unique transit included in the baseline. """ period, duration = self._validate_period_and_duration(period, duration) transit_time = self._as_relative_time("transit_time", transit_time) period = float(strip_units(period)) duration = float(strip_units(duration)) transit_time = float(strip_units(transit_time)) t = np.ascontiguousarray(strip_units(self._trel), dtype=np.float64) y = np.ascontiguousarray(strip_units(self.y), dtype=np.float64) if self.dy is None: ivar = np.ones_like(y) else: ivar = ( 1.0 / np.ascontiguousarray(strip_units(self.dy), dtype=np.float64) ** 2 ) # This a helper function that will compute the depth for several # different hypothesized transit models with different parameters def _compute_depth(m, y_out=None, var_out=None): if np.any(m) and (var_out is None or np.isfinite(var_out)): var_m = 1.0 / np.sum(ivar[m]) y_m = np.sum(y[m] * ivar[m]) * var_m if y_out is None: return y_m, var_m return y_out - y_m, np.sqrt(var_m + var_out) return 0.0, np.inf # Compute the depth of the fiducial model and the two models at twice # the period hp = 0.5 * period m_in = np.abs((t - transit_time + hp) % period - hp) < 0.5 * duration m_out = ~m_in m_odd = np.abs((t - transit_time) % (2 * period) - period) < 0.5 * duration m_even = ( np.abs((t - transit_time + period) % (2 * period) - period) < 0.5 * duration ) y_out, var_out = _compute_depth(m_out) depth = _compute_depth(m_in, y_out, var_out) depth_odd = _compute_depth(m_odd, y_out, var_out) depth_even = _compute_depth(m_even, y_out, var_out) y_in = y_out - depth[0] # Compute the depth of the model at a phase of 0.5*period m_phase = np.abs((t - transit_time) % period - hp) < 0.5 * duration depth_phase = _compute_depth(m_phase, *_compute_depth((~m_phase) & m_out)) # Compute the depth of a model with a period of 0.5*period m_half = ( np.abs((t - transit_time + 0.25 * period) % (0.5 * period) - 0.25 * period) < 0.5 * duration ) depth_half = _compute_depth(m_half, *_compute_depth(~m_half)) # Compute the number of points in each transit transit_id = np.round((t[m_in] - transit_time) / period).astype(int) transit_times = ( period * np.arange(transit_id.min(), transit_id.max() + 1) + transit_time ) unique_ids, unique_counts = np.unique(transit_id, return_counts=True) unique_ids -= np.min(transit_id) transit_id -= np.min(transit_id) counts = np.zeros(np.max(transit_id) + 1, dtype=int) counts[unique_ids] = unique_counts # Compute the per-transit log likelihood ll = -0.5 * ivar[m_in] * ((y[m_in] - y_in) ** 2 - (y[m_in] - y_out) ** 2) lls = np.zeros(len(counts)) for i in unique_ids: lls[i] = np.sum(ll[transit_id == i]) full_ll = -0.5 * np.sum(ivar[m_in] * (y[m_in] - y_in) ** 2) full_ll -= 0.5 * np.sum(ivar[m_out] * (y[m_out] - y_out) ** 2) # Compute the log likelihood of a sine model A = np.vstack( ( np.sin(2 * np.pi * t / period), np.cos(2 * np.pi * t / period), np.ones_like(t), ) ).T w = np.linalg.solve(np.dot(A.T, A * ivar[:, None]), np.dot(A.T, y * ivar)) mod = np.dot(A, w) sin_ll = -0.5 * np.sum((y - mod) ** 2 * ivar) # Format the results y_unit = self._y_unit() ll_unit = 1 if self.dy is None: ll_unit = y_unit * y_unit return dict( transit_times=self._as_absolute_time_if_needed( "transit_times", transit_times * self._t_unit() ), per_transit_count=counts, per_transit_log_likelihood=lls * ll_unit, depth=(depth[0] * y_unit, depth[1] * y_unit), depth_phased=(depth_phase[0] * y_unit, depth_phase[1] * y_unit), depth_half=(depth_half[0] * y_unit, depth_half[1] * y_unit), depth_odd=(depth_odd[0] * y_unit, depth_odd[1] * y_unit), depth_even=(depth_even[0] * y_unit, depth_even[1] * y_unit), harmonic_amplitude=np.sqrt(np.sum(w[:2] ** 2)) * y_unit, harmonic_delta_log_likelihood=(sin_ll - full_ll) * ll_unit, ) def transit_mask(self, t, period, duration, transit_time): """Compute which data points are in transit for a given parameter set. Parameters ---------- t : array-like or `~astropy.units.Quantity` ['time'] Times where the mask should be evaluated. period : float or `~astropy.units.Quantity` ['time'] The period of the transits. duration : float or `~astropy.units.Quantity` ['time'] The duration of the transit. transit_time : float or `~astropy.units.Quantity` or `~astropy.time.Time` The mid-transit time of a reference transit. Returns ------- transit_mask : array-like A boolean array where ``True`` indicates and in transit point and ``False`` indicates and out-of-transit point. """ period, duration = self._validate_period_and_duration(period, duration) transit_time = self._as_relative_time("transit_time", transit_time) t = strip_units(self._as_relative_time("t", t)) period = float(strip_units(period)) duration = float(strip_units(duration)) transit_time = float(strip_units(transit_time)) hp = 0.5 * period return np.abs((t - transit_time + hp) % period - hp) < 0.5 * duration def _validate_inputs(self, t, y, dy): """Private method used to check the consistency of the inputs. Parameters ---------- t : array-like, `~astropy.units.Quantity`, `~astropy.time.Time`, or `~astropy.time.TimeDelta` Sequence of observation times. y : array-like or `~astropy.units.Quantity` Sequence of observations associated with times t. dy : float, array-like, or `~astropy.units.Quantity` Error or sequence of observational errors associated with times t. Returns ------- t, y, dy : array-like, `~astropy.units.Quantity`, or `~astropy.time.Time` The inputs with consistent shapes and units. Raises ------ ValueError If the dimensions are incompatible or if the units of dy cannot be converted to the units of y. """ # Validate shapes of inputs if dy is None: t, y = np.broadcast_arrays(t, y, subok=True) else: t, y, dy = np.broadcast_arrays(t, y, dy, subok=True) if t.ndim != 1: raise ValueError("Inputs (t, y, dy) must be 1-dimensional") # validate units of inputs if any is a Quantity if dy is not None: dy = validate_unit_consistency(y, dy) return t, y, dy def _validate_duration(self, duration): """Private method used to check a set of test durations. Parameters ---------- duration : float, array-like, or `~astropy.units.Quantity` The set of durations that will be considered. Returns ------- duration : array-like or `~astropy.units.Quantity` The input reformatted with the correct shape and units. Raises ------ ValueError If the units of duration cannot be converted to the units of t. """ duration = np.atleast_1d(np.abs(duration)) if duration.ndim != 1 or duration.size == 0: raise ValueError("duration must be 1-dimensional") return validate_unit_consistency(self._trel, duration) def _validate_period_and_duration(self, period, duration): """Private method used to check a set of periods and durations. Parameters ---------- period : float, array-like, or `~astropy.units.Quantity` ['time'] The set of test periods. duration : float, array-like, or `~astropy.units.Quantity` ['time'] The set of durations that will be considered. Returns ------- period, duration : array-like or `~astropy.units.Quantity` ['time'] The inputs reformatted with the correct shapes and units. Raises ------ ValueError If the units of period or duration cannot be converted to the units of t. """ duration = self._validate_duration(duration) period = np.atleast_1d(np.abs(period)) if period.ndim != 1 or period.size == 0: raise ValueError("period must be 1-dimensional") period = validate_unit_consistency(self._trel, period) if not np.min(period) > np.max(duration): raise ValueError( "The maximum transit duration must be shorter than the minimum period" ) return period, duration def _format_results(self, t_ref, objective, period, results): """A private method used to wrap and add units to the periodogram. Parameters ---------- t_ref : float The minimum time in the time series (a reference time). objective : str The name of the objective used in the optimization. period : array-like or `~astropy.units.Quantity` ['time'] The set of trial periods. results : tuple The output of one of the periodogram implementations. """ ( power, depth, depth_err, duration, transit_time, depth_snr, log_likelihood, ) = results transit_time += t_ref if has_units(self._trel): transit_time = units.Quantity(transit_time, unit=self._trel.unit) transit_time = self._as_absolute_time_if_needed( "transit_time", transit_time ) duration = units.Quantity(duration, unit=self._trel.unit) if has_units(self.y): depth = units.Quantity(depth, unit=self.y.unit) depth_err = units.Quantity(depth_err, unit=self.y.unit) depth_snr = units.Quantity(depth_snr, unit=units.one) if self.dy is None: if objective == "likelihood": power = units.Quantity(power, unit=self.y.unit**2) else: power = units.Quantity(power, unit=units.one) log_likelihood = units.Quantity(log_likelihood, unit=self.y.unit**2) else: power = units.Quantity(power, unit=units.one) log_likelihood = units.Quantity(log_likelihood, unit=units.one) return BoxLeastSquaresResults( objective, period, power, depth, depth_err, duration, transit_time, depth_snr, log_likelihood, ) def _t_unit(self): if has_units(self._trel): return self._trel.unit else: return 1 def _y_unit(self): if has_units(self.y): return self.y.unit else: return 1 class BoxLeastSquaresResults(dict): """The results of a BoxLeastSquares search. Attributes ---------- objective : str The scalar used to optimize to find the best fit phase, duration, and depth. See :func:`BoxLeastSquares.power` for more information. period : array-like or `~astropy.units.Quantity` ['time'] The set of test periods. power : array-like or `~astropy.units.Quantity` The periodogram evaluated at the periods in ``period``. If ``objective`` is: * ``'likelihood'``: the values of ``power`` are the log likelihood maximized over phase, depth, and duration, or * ``'snr'``: the values of ``power`` are the signal-to-noise with which the depth is measured maximized over phase, depth, and duration. depth : array-like or `~astropy.units.Quantity` The estimated depth of the maximum power model at each period. depth_err : array-like or `~astropy.units.Quantity` The 1-sigma uncertainty on ``depth``. duration : array-like or `~astropy.units.Quantity` ['time'] The maximum power duration at each period. transit_time : array-like, `~astropy.units.Quantity`, or `~astropy.time.Time` The maximum power phase of the transit in units of time. This indicates the mid-transit time and it will always be in the range (0, period). depth_snr : array-like or `~astropy.units.Quantity` The signal-to-noise with which the depth is measured at maximum power. log_likelihood : array-like or `~astropy.units.Quantity` The log likelihood of the maximum power model. """ def __init__(self, *args): super().__init__( zip( ( "objective", "period", "power", "depth", "depth_err", "duration", "transit_time", "depth_snr", "log_likelihood", ), args, ) ) def __getattr__(self, name): try: return self[name] except KeyError: raise AttributeError(name) __setattr__ = dict.__setitem__ __delattr__ = dict.__delitem__ def __repr__(self): if self.keys(): m = max(map(len, list(self.keys()))) + 1 return "\n".join( [k.rjust(m) + ": " + repr(v) for k, v in sorted(self.items())] ) else: return self.__class__.__name__ + "()" def __dir__(self): return list(self.keys())
f8023d7a925a359313d1f8a40dbd2fae310fa70033e1277ad85bbd6130951a01
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Box Least Squares Algorithms. Box Least Squares ================= AstroPy-compatible reference implementation of the transit periorogram used to discover transiting exoplanets. """ __all__ = ["BoxLeastSquares", "BoxLeastSquaresResults"] from .core import BoxLeastSquares, BoxLeastSquaresResults
8ac927ba8fc677ec666dff47435db77c8b6a821ef90e6e91a4e9a96521bfe294
""" Utilities for computing periodogram statistics. This is an internal module; users should access this functionality via the ``false_alarm_probability`` and ``false_alarm_level`` methods of the ``astropy.timeseries.LombScargle`` API. """ from functools import wraps import numpy as np from astropy import units as u def _weighted_sum(val, dy): if dy is not None: return (val / dy**2).sum() else: return val.sum() def _weighted_mean(val, dy): if dy is None: return val.mean() else: return _weighted_sum(val, dy) / _weighted_sum(np.ones(val.shape), dy) def _weighted_var(val, dy): return _weighted_mean(val**2, dy) - _weighted_mean(val, dy) ** 2 def _gamma(N): from scipy.special import gammaln # Note: this is closely approximated by (1 - 0.75 / N) for large N return np.sqrt(2 / N) * np.exp(gammaln(N / 2) - gammaln((N - 1) / 2)) def vectorize_first_argument(func): @wraps(func) def new_func(x, *args, **kwargs): x = np.asarray(x) return np.array([func(xi, *args, **kwargs) for xi in x.flat]).reshape(x.shape) return new_func def pdf_single(z, N, normalization, dH=1, dK=3): """Probability density function for Lomb-Scargle periodogram. Compute the expected probability density function of the periodogram for the null hypothesis - i.e. data consisting of Gaussian noise. Parameters ---------- z : array-like The periodogram value. N : int The number of data points from which the periodogram was computed. normalization : {'standard', 'model', 'log', 'psd'} The periodogram normalization. dH, dK : int, optional The number of parameters in the null hypothesis and the model. Returns ------- pdf : np.ndarray The expected probability density function. Notes ----- For normalization='psd', the distribution can only be computed for periodograms constructed with errors specified. All expressions used here are adapted from Table 1 of Baluev 2008 [1]_. References ---------- .. [1] Baluev, R.V. MNRAS 385, 1279 (2008) """ z = np.asarray(z) if dK - dH != 2: raise NotImplementedError("Degrees of freedom != 2") Nk = N - dK if normalization == "psd": return np.exp(-z) elif normalization == "standard": return 0.5 * Nk * (1 - z) ** (0.5 * Nk - 1) elif normalization == "model": return 0.5 * Nk * (1 + z) ** (-0.5 * Nk - 1) elif normalization == "log": return 0.5 * Nk * np.exp(-0.5 * Nk * z) else: raise ValueError(f"normalization='{normalization}' is not recognized") def fap_single(z, N, normalization, dH=1, dK=3): """Single-frequency false alarm probability for the Lomb-Scargle periodogram. This is equal to 1 - cdf, where cdf is the cumulative distribution. The single-frequency false alarm probability should not be confused with the false alarm probability for the largest peak. Parameters ---------- z : array-like The periodogram value. N : int The number of data points from which the periodogram was computed. normalization : {'standard', 'model', 'log', 'psd'} The periodogram normalization. dH, dK : int, optional The number of parameters in the null hypothesis and the model. Returns ------- false_alarm_probability : np.ndarray The single-frequency false alarm probability. Notes ----- For normalization='psd', the distribution can only be computed for periodograms constructed with errors specified. All expressions used here are adapted from Table 1 of Baluev 2008 [1]_. References ---------- .. [1] Baluev, R.V. MNRAS 385, 1279 (2008) """ z = np.asarray(z) if dK - dH != 2: raise NotImplementedError("Degrees of freedom != 2") Nk = N - dK if normalization == "psd": return np.exp(-z) elif normalization == "standard": return (1 - z) ** (0.5 * Nk) elif normalization == "model": return (1 + z) ** (-0.5 * Nk) elif normalization == "log": return np.exp(-0.5 * Nk * z) else: raise ValueError(f"normalization='{normalization}' is not recognized") def inv_fap_single(fap, N, normalization, dH=1, dK=3): """Single-frequency inverse false alarm probability. This function computes the periodogram value associated with the specified single-frequency false alarm probability. This should not be confused with the false alarm level of the largest peak. Parameters ---------- fap : array-like The false alarm probability. N : int The number of data points from which the periodogram was computed. normalization : {'standard', 'model', 'log', 'psd'} The periodogram normalization. dH, dK : int, optional The number of parameters in the null hypothesis and the model. Returns ------- z : np.ndarray The periodogram power corresponding to the single-peak false alarm probability. Notes ----- For normalization='psd', the distribution can only be computed for periodograms constructed with errors specified. All expressions used here are adapted from Table 1 of Baluev 2008 [1]_. References ---------- .. [1] Baluev, R.V. MNRAS 385, 1279 (2008) """ fap = np.asarray(fap) if dK - dH != 2: raise NotImplementedError("Degrees of freedom != 2") Nk = N - dK # No warnings for fap = 0; rather, just let it give the right infinity. with np.errstate(divide="ignore"): if normalization == "psd": return -np.log(fap) elif normalization == "standard": return 1 - fap ** (2 / Nk) elif normalization == "model": return -1 + fap ** (-2 / Nk) elif normalization == "log": return -2 / Nk * np.log(fap) else: raise ValueError(f"normalization='{normalization}' is not recognized") def cdf_single(z, N, normalization, dH=1, dK=3): """Cumulative distribution for the Lomb-Scargle periodogram. Compute the expected cumulative distribution of the periodogram for the null hypothesis - i.e. data consisting of Gaussian noise. Parameters ---------- z : array-like The periodogram value. N : int The number of data points from which the periodogram was computed. normalization : {'standard', 'model', 'log', 'psd'} The periodogram normalization. dH, dK : int, optional The number of parameters in the null hypothesis and the model. Returns ------- cdf : np.ndarray The expected cumulative distribution function. Notes ----- For normalization='psd', the distribution can only be computed for periodograms constructed with errors specified. All expressions used here are adapted from Table 1 of Baluev 2008 [1]_. References ---------- .. [1] Baluev, R.V. MNRAS 385, 1279 (2008) """ return 1 - fap_single(z, N, normalization=normalization, dH=dH, dK=dK) def tau_davies(Z, fmax, t, y, dy, normalization="standard", dH=1, dK=3): """tau factor for estimating Davies bound (Baluev 2008, Table 1).""" N = len(t) NH = N - dH # DOF for null hypothesis NK = N - dK # DOF for periodic hypothesis Dt = _weighted_var(t, dy) Teff = np.sqrt(4 * np.pi * Dt) # Effective baseline W = fmax * Teff Z = np.asarray(Z) if normalization == "psd": # 'psd' normalization is same as Baluev's z return W * np.exp(-Z) * np.sqrt(Z) elif normalization == "standard": # 'standard' normalization is Z = 2/NH * z_1 return _gamma(NH) * W * (1 - Z) ** (0.5 * (NK - 1)) * np.sqrt(0.5 * NH * Z) elif normalization == "model": # 'model' normalization is Z = 2/NK * z_2 return _gamma(NK) * W * (1 + Z) ** (-0.5 * NK) * np.sqrt(0.5 * NK * Z) elif normalization == "log": # 'log' normalization is Z = 2/NK * z_3 return ( _gamma(NK) * W * np.exp(-0.5 * Z * (NK - 0.5)) * np.sqrt(NK * np.sinh(0.5 * Z)) ) else: raise NotImplementedError(f"normalization={normalization}") def fap_naive(Z, fmax, t, y, dy, normalization="standard"): """False Alarm Probability based on estimated number of indep frequencies.""" N = len(t) T = max(t) - min(t) N_eff = fmax * T fap_s = fap_single(Z, N, normalization=normalization) # result is 1 - (1 - fap_s) ** N_eff # this is much more precise for small Z / large N # Ignore divide by zero no np.log1p - fine to let it return -inf. with np.errstate(divide="ignore"): return -np.expm1(N_eff * np.log1p(-fap_s)) def inv_fap_naive(fap, fmax, t, y, dy, normalization="standard"): """Inverse FAP based on estimated number of indep frequencies.""" fap = np.asarray(fap) N = len(t) T = max(t) - min(t) N_eff = fmax * T # fap_s = 1 - (1 - fap) ** (1 / N_eff) # Ignore divide by zero no np.log - fine to let it return -inf. with np.errstate(divide="ignore"): fap_s = -np.expm1(np.log(1 - fap) / N_eff) return inv_fap_single(fap_s, N, normalization) def fap_davies(Z, fmax, t, y, dy, normalization="standard"): """Davies upper-bound to the false alarm probability. (Eqn 5 of Baluev 2008) """ N = len(t) fap_s = fap_single(Z, N, normalization=normalization) tau = tau_davies(Z, fmax, t, y, dy, normalization=normalization) return fap_s + tau @vectorize_first_argument def inv_fap_davies(p, fmax, t, y, dy, normalization="standard"): """Inverse of the davies upper-bound.""" from scipy import optimize args = (fmax, t, y, dy, normalization) z0 = inv_fap_naive(p, *args) func = lambda z, *args: fap_davies(z, *args) - p res = optimize.root(func, z0, args=args, method="lm") if not res.success: raise ValueError(f"inv_fap_baluev did not converge for p={p}") return res.x def fap_baluev(Z, fmax, t, y, dy, normalization="standard"): """Alias-free approximation to false alarm probability. (Eqn 6 of Baluev 2008) """ fap_s = fap_single(Z, len(t), normalization) tau = tau_davies(Z, fmax, t, y, dy, normalization=normalization) # result is 1 - (1 - fap_s) * np.exp(-tau) # this is much more precise for small numbers return -np.expm1(-tau) + fap_s * np.exp(-tau) @vectorize_first_argument def inv_fap_baluev(p, fmax, t, y, dy, normalization="standard"): """Inverse of the Baluev alias-free approximation.""" from scipy import optimize args = (fmax, t, y, dy, normalization) z0 = inv_fap_naive(p, *args) func = lambda z, *args: fap_baluev(z, *args) - p res = optimize.root(func, z0, args=args, method="lm") if not res.success: raise ValueError(f"inv_fap_baluev did not converge for p={p}") return res.x def _bootstrap_max(t, y, dy, fmax, normalization, random_seed, n_bootstrap=1000): """Generate a sequence of bootstrap estimates of the max.""" from .core import LombScargle rng = np.random.default_rng(random_seed) power_max = [] for _ in range(n_bootstrap): s = rng.integers(0, len(y), len(y)) # sample with replacement ls_boot = LombScargle( t, y[s], dy if dy is None else dy[s], normalization=normalization ) freq, power = ls_boot.autopower(maximum_frequency=fmax) power_max.append(power.max()) power_max = u.Quantity(power_max) power_max.sort() return power_max def fap_bootstrap( Z, fmax, t, y, dy, normalization="standard", n_bootstraps=1000, random_seed=None ): """Bootstrap estimate of the false alarm probability.""" pmax = _bootstrap_max(t, y, dy, fmax, normalization, random_seed, n_bootstraps) return 1 - np.searchsorted(pmax, Z) / len(pmax) def inv_fap_bootstrap( fap, fmax, t, y, dy, normalization="standard", n_bootstraps=1000, random_seed=None ): """Bootstrap estimate of the inverse false alarm probability.""" fap = np.asarray(fap) pmax = _bootstrap_max(t, y, dy, fmax, normalization, random_seed, n_bootstraps) return pmax[np.clip(np.floor((1 - fap) * len(pmax)).astype(int), 0, len(pmax) - 1)] METHODS = { "single": fap_single, "naive": fap_naive, "davies": fap_davies, "baluev": fap_baluev, "bootstrap": fap_bootstrap, } def false_alarm_probability( Z, fmax, t, y, dy, normalization="standard", method="baluev", method_kwds=None ): """Compute the approximate false alarm probability for periodogram peaks Z. This gives an estimate of the false alarm probability for the largest value in a periodogram, based on the null hypothesis of non-varying data with Gaussian noise. The true probability cannot be computed analytically, so each method available here is an approximation to the true value. Parameters ---------- Z : array-like The periodogram value. fmax : float The maximum frequency of the periodogram. t, y, dy : array-like The data times, values, and errors. normalization : {'standard', 'model', 'log', 'psd'}, optional The periodogram normalization. method : {'baluev', 'davies', 'naive', 'bootstrap'}, optional The approximation method to use. method_kwds : dict, optional Additional method-specific keywords. Returns ------- false_alarm_probability : np.ndarray The false alarm probability. Notes ----- For normalization='psd', the distribution can only be computed for periodograms constructed with errors specified. See Also -------- false_alarm_level : compute the periodogram level for a particular fap References ---------- .. [1] Baluev, R.V. MNRAS 385, 1279 (2008) """ if method == "single": return fap_single(Z, len(t), normalization) elif method not in METHODS: raise ValueError(f"Unrecognized method: {method}") method = METHODS[method] method_kwds = method_kwds or {} return method(Z, fmax, t, y, dy, normalization, **method_kwds) INV_METHODS = { "single": inv_fap_single, "naive": inv_fap_naive, "davies": inv_fap_davies, "baluev": inv_fap_baluev, "bootstrap": inv_fap_bootstrap, } def false_alarm_level( p, fmax, t, y, dy, normalization, method="baluev", method_kwds=None ): """Compute the approximate periodogram level given a false alarm probability. This gives an estimate of the periodogram level corresponding to a specified false alarm probability for the largest peak, assuming a null hypothesis of non-varying data with Gaussian noise. The true level cannot be computed analytically, so each method available here is an approximation to the true value. Parameters ---------- p : array-like The false alarm probability (0 < p < 1). fmax : float The maximum frequency of the periodogram. t, y, dy : arrays The data times, values, and errors. normalization : {'standard', 'model', 'log', 'psd'}, optional The periodogram normalization. method : {'baluev', 'davies', 'naive', 'bootstrap'}, optional The approximation method to use. method_kwds : dict, optional Additional method-specific keywords. Returns ------- z : np.ndarray The periodogram level. Notes ----- For normalization='psd', the distribution can only be computed for periodograms constructed with errors specified. See Also -------- false_alarm_probability : compute the fap for a given periodogram level References ---------- .. [1] Baluev, R.V. MNRAS 385, 1279 (2008) """ if method == "single": return inv_fap_single(p, len(t), normalization) elif method not in INV_METHODS: raise ValueError(f"Unrecognized method: {method}") method = INV_METHODS[method] method_kwds = method_kwds or {} return method(p, fmax, t, y, dy, normalization, **method_kwds)
61cc1ba3164724287c284977f6abc98c5a0a9f99b3d6dfd13abd9bf19f7fae24
"""Main Lomb-Scargle Implementation.""" import numpy as np from astropy import units from astropy import units as u from astropy.time import Time, TimeDelta from astropy.timeseries.periodograms.base import BasePeriodogram from . import _statistics from .implementations import available_methods, lombscargle from .implementations.mle import design_matrix, periodic_fit def has_units(obj): return hasattr(obj, "unit") def get_unit(obj): return getattr(obj, "unit", 1) def strip_units(*arrs): strip = lambda a: None if a is None else np.asarray(a) if len(arrs) == 1: return strip(arrs[0]) else: return map(strip, arrs) class LombScargle(BasePeriodogram): """Compute the Lomb-Scargle Periodogram. This implementations here are based on code presented in [1]_ and [2]_; if you use this functionality in an academic application, citation of those works would be appreciated. Parameters ---------- t : array-like or `~astropy.units.Quantity` ['time'] sequence of observation times y : array-like or `~astropy.units.Quantity` sequence of observations associated with times t dy : float, array-like, or `~astropy.units.Quantity`, optional error or sequence of observational errors associated with times t fit_mean : bool, optional if True, include a constant offset as part of the model at each frequency. This can lead to more accurate results, especially in the case of incomplete phase coverage. center_data : bool, optional if True, pre-center the data by subtracting the weighted mean of the input data. This is especially important if fit_mean = False nterms : int, optional number of terms to use in the Fourier fit normalization : {'standard', 'model', 'log', 'psd'}, optional Normalization to use for the periodogram. Examples -------- Generate noisy periodic data: >>> rand = np.random.default_rng(42) >>> t = 100 * rand.random(100) >>> y = np.sin(2 * np.pi * t) + rand.standard_normal(100) Compute the Lomb-Scargle periodogram on an automatically-determined frequency grid & find the frequency of max power: >>> frequency, power = LombScargle(t, y).autopower() >>> frequency[np.argmax(power)] # doctest: +FLOAT_CMP 1.0007641728995051 Compute the Lomb-Scargle periodogram at a user-specified frequency grid: >>> freq = np.arange(0.8, 1.3, 0.1) >>> LombScargle(t, y).power(freq) # doctest: +FLOAT_CMP array([0.0792948 , 0.01778874, 0.25328167, 0.01064157, 0.01471387]) If the inputs are astropy Quantities with units, the units will be validated and the outputs will also be Quantities with appropriate units: >>> from astropy import units as u >>> t = t * u.s >>> y = y * u.mag >>> frequency, power = LombScargle(t, y).autopower() >>> frequency.unit Unit("1 / s") >>> power.unit Unit(dimensionless) Note here that the Lomb-Scargle power is always a unitless quantity, because it is related to the :math:`\\chi^2` of the best-fit periodic model at each frequency. References ---------- .. [1] Vanderplas, J., Connolly, A. Ivezic, Z. & Gray, A. *Introduction to astroML: Machine learning for astrophysics*. Proceedings of the Conference on Intelligent Data Understanding (2012) .. [2] VanderPlas, J. & Ivezic, Z. *Periodograms for Multiband Astronomical Time Series*. ApJ 812.1:18 (2015) """ available_methods = available_methods() def __init__( self, t, y, dy=None, fit_mean=True, center_data=True, nterms=1, normalization="standard", ): # If t is a TimeDelta, convert it to a quantity. The units we convert # to don't really matter since the user gets a Quantity back at the end # so can convert to any units they like. if isinstance(t, TimeDelta): t = t.to("day") # We want to expose self.t as being the times the user passed in, but # if the times are absolute, we need to convert them to relative times # internally, so we use self._trel and self._tstart for this. self.t = t if isinstance(self.t, Time): self._tstart = self.t[0] trel = (self.t - self._tstart).to(u.day) else: self._tstart = None trel = self.t self._trel, self.y, self.dy = self._validate_inputs(trel, y, dy) self.fit_mean = fit_mean self.center_data = center_data self.nterms = nterms self.normalization = normalization def _validate_inputs(self, t, y, dy): # Validate shapes of inputs if dy is None: t, y = np.broadcast_arrays(t, y, subok=True) else: t, y, dy = np.broadcast_arrays(t, y, dy, subok=True) if t.ndim != 1: raise ValueError("Inputs (t, y, dy) must be 1-dimensional") # validate units of inputs if any is a Quantity if any(has_units(arr) for arr in (t, y, dy)): t, y = map(units.Quantity, (t, y)) if dy is not None: dy = units.Quantity(dy) try: dy = units.Quantity(dy, unit=y.unit) except units.UnitConversionError: raise ValueError("Units of dy not equivalent to units of y") return t, y, dy def _validate_frequency(self, frequency): frequency = np.asanyarray(frequency) if has_units(self._trel): frequency = units.Quantity(frequency) try: frequency = units.Quantity(frequency, unit=1.0 / self._trel.unit) except units.UnitConversionError: raise ValueError("Units of frequency not equivalent to units of 1/t") else: if has_units(frequency): raise ValueError("frequency have units while 1/t doesn't.") return frequency def _validate_t(self, t): t = np.asanyarray(t) if has_units(self._trel): t = units.Quantity(t) try: t = units.Quantity(t, unit=self._trel.unit) except units.UnitConversionError: raise ValueError("Units of t not equivalent to units of input self.t") return t def _power_unit(self, norm): if has_units(self.y): if self.dy is None and norm == "psd": return self.y.unit**2 else: return units.dimensionless_unscaled else: return 1 def autofrequency( self, samples_per_peak=5, nyquist_factor=5, minimum_frequency=None, maximum_frequency=None, return_freq_limits=False, ): """Determine a suitable frequency grid for data. Note that this assumes the peak width is driven by the observational baseline, which is generally a good assumption when the baseline is much larger than the oscillation period. If you are searching for periods longer than the baseline of your observations, this may not perform well. Even with a large baseline, be aware that the maximum frequency returned is based on the concept of "average Nyquist frequency", which may not be useful for irregularly-sampled data. The maximum frequency can be adjusted via the nyquist_factor argument, or through the maximum_frequency argument. Parameters ---------- samples_per_peak : float, optional The approximate number of desired samples across the typical peak nyquist_factor : float, optional The multiple of the average nyquist frequency used to choose the maximum frequency if maximum_frequency is not provided. minimum_frequency : float, optional If specified, then use this minimum frequency rather than one chosen based on the size of the baseline. maximum_frequency : float, optional If specified, then use this maximum frequency rather than one chosen based on the average nyquist frequency. return_freq_limits : bool, optional if True, return only the frequency limits rather than the full frequency grid. Returns ------- frequency : ndarray or `~astropy.units.Quantity` ['frequency'] The heuristically-determined optimal frequency bin """ baseline = self._trel.max() - self._trel.min() n_samples = self._trel.size df = 1.0 / baseline / samples_per_peak if minimum_frequency is None: minimum_frequency = 0.5 * df if maximum_frequency is None: avg_nyquist = 0.5 * n_samples / baseline maximum_frequency = nyquist_factor * avg_nyquist Nf = 1 + int(np.round((maximum_frequency - minimum_frequency) / df)) if return_freq_limits: return minimum_frequency, minimum_frequency + df * (Nf - 1) else: return minimum_frequency + df * np.arange(Nf) def autopower( self, method="auto", method_kwds=None, normalization=None, samples_per_peak=5, nyquist_factor=5, minimum_frequency=None, maximum_frequency=None, ): """Compute Lomb-Scargle power at automatically-determined frequencies. Parameters ---------- method : str, optional specify the lomb scargle implementation to use. Options are: - 'auto': choose the best method based on the input - 'fast': use the O[N log N] fast method. Note that this requires evenly-spaced frequencies: by default this will be checked unless ``assume_regular_frequency`` is set to True. - 'slow': use the O[N^2] pure-python implementation - 'cython': use the O[N^2] cython implementation. This is slightly faster than method='slow', but much more memory efficient. - 'chi2': use the O[N^2] chi2/linear-fitting implementation - 'fastchi2': use the O[N log N] chi2 implementation. Note that this requires evenly-spaced frequencies: by default this will be checked unless ``assume_regular_frequency`` is set to True. - 'scipy': use ``scipy.signal.lombscargle``, which is an O[N^2] implementation written in C. Note that this does not support heteroskedastic errors. method_kwds : dict, optional additional keywords to pass to the lomb-scargle method normalization : {'standard', 'model', 'log', 'psd'}, optional If specified, override the normalization specified at instantiation. samples_per_peak : float, optional The approximate number of desired samples across the typical peak nyquist_factor : float, optional The multiple of the average nyquist frequency used to choose the maximum frequency if maximum_frequency is not provided. minimum_frequency : float or `~astropy.units.Quantity` ['frequency'], optional If specified, then use this minimum frequency rather than one chosen based on the size of the baseline. Should be `~astropy.units.Quantity` if inputs to LombScargle are `~astropy.units.Quantity`. maximum_frequency : float or `~astropy.units.Quantity` ['frequency'], optional If specified, then use this maximum frequency rather than one chosen based on the average nyquist frequency. Should be `~astropy.units.Quantity` if inputs to LombScargle are `~astropy.units.Quantity`. Returns ------- frequency, power : ndarray The frequency and Lomb-Scargle power """ frequency = self.autofrequency( samples_per_peak=samples_per_peak, nyquist_factor=nyquist_factor, minimum_frequency=minimum_frequency, maximum_frequency=maximum_frequency, ) power = self.power( frequency, normalization=normalization, method=method, method_kwds=method_kwds, assume_regular_frequency=True, ) return frequency, power def power( self, frequency, normalization=None, method="auto", assume_regular_frequency=False, method_kwds=None, ): """Compute the Lomb-Scargle power at the given frequencies. Parameters ---------- frequency : array-like or `~astropy.units.Quantity` ['frequency'] frequencies (not angular frequencies) at which to evaluate the periodogram. Note that in order to use method='fast', frequencies must be regularly-spaced. method : str, optional specify the lomb scargle implementation to use. Options are: - 'auto': choose the best method based on the input - 'fast': use the O[N log N] fast method. Note that this requires evenly-spaced frequencies: by default this will be checked unless ``assume_regular_frequency`` is set to True. - 'slow': use the O[N^2] pure-python implementation - 'cython': use the O[N^2] cython implementation. This is slightly faster than method='slow', but much more memory efficient. - 'chi2': use the O[N^2] chi2/linear-fitting implementation - 'fastchi2': use the O[N log N] chi2 implementation. Note that this requires evenly-spaced frequencies: by default this will be checked unless ``assume_regular_frequency`` is set to True. - 'scipy': use ``scipy.signal.lombscargle``, which is an O[N^2] implementation written in C. Note that this does not support heteroskedastic errors. assume_regular_frequency : bool, optional if True, assume that the input frequency is of the form freq = f0 + df * np.arange(N). Only referenced if method is 'auto' or 'fast'. normalization : {'standard', 'model', 'log', 'psd'}, optional If specified, override the normalization specified at instantiation. method_kwds : dict, optional additional keywords to pass to the lomb-scargle method Returns ------- power : ndarray The Lomb-Scargle power at the specified frequency """ if normalization is None: normalization = self.normalization frequency = self._validate_frequency(frequency) power = lombscargle( *strip_units(self._trel, self.y, self.dy), frequency=strip_units(frequency), center_data=self.center_data, fit_mean=self.fit_mean, nterms=self.nterms, normalization=normalization, method=method, method_kwds=method_kwds, assume_regular_frequency=assume_regular_frequency, ) return power * self._power_unit(normalization) def _as_relative_time(self, name, times): """ Convert the provided times (if absolute) to relative times using the current _tstart value. If the times provided are relative, they are returned without conversion (though we still do some checks). """ if isinstance(times, TimeDelta): times = times.to("day") if self._tstart is None: if isinstance(times, Time): raise TypeError( f"{name} was provided as an absolute time but " "the LombScargle class was initialized " "with relative times." ) else: if isinstance(times, Time): times = (times - self._tstart).to(u.day) else: raise TypeError( f"{name} was provided as a relative time but " "the LombScargle class was initialized " "with absolute times." ) return times def model(self, t, frequency): """Compute the Lomb-Scargle model at the given frequency. The model at a particular frequency is a linear model: model = offset + dot(design_matrix, model_parameters) Parameters ---------- t : array-like or `~astropy.units.Quantity` ['time'] Times (length ``n_samples``) at which to compute the model. frequency : float the frequency for the model Returns ------- y : np.ndarray The model fit corresponding to the input times (will have length ``n_samples``). See Also -------- design_matrix offset model_parameters """ frequency = self._validate_frequency(frequency) t = self._validate_t(self._as_relative_time("t", t)) y_fit = periodic_fit( *strip_units(self._trel, self.y, self.dy), frequency=strip_units(frequency), t_fit=strip_units(t), center_data=self.center_data, fit_mean=self.fit_mean, nterms=self.nterms, ) return y_fit * get_unit(self.y) def offset(self): """Return the offset of the model. The offset of the model is the (weighted) mean of the y values. Note that if self.center_data is False, the offset is 0 by definition. Returns ------- offset : scalar See Also -------- design_matrix model model_parameters """ y, dy = strip_units(self.y, self.dy) if dy is None: dy = 1 dy = np.broadcast_to(dy, y.shape) if self.center_data: w = dy**-2.0 y_mean = np.dot(y, w) / w.sum() else: y_mean = 0 return y_mean * get_unit(self.y) def model_parameters(self, frequency, units=True): r"""Compute the best-fit model parameters at the given frequency. The model described by these parameters is: .. math:: y(t; f, \vec{\theta}) = \theta_0 + \sum_{n=1}^{\tt nterms} [\theta_{2n-1}\sin(2\pi n f t) + \theta_{2n}\cos(2\pi n f t)] where :math:`\vec{\theta}` is the array of parameters returned by this function. Parameters ---------- frequency : float the frequency for the model units : bool If True (default), return design matrix with data units. Returns ------- theta : np.ndarray (n_parameters,) The best-fit model parameters at the given frequency. See Also -------- design_matrix model offset """ frequency = self._validate_frequency(frequency) t, y, dy = strip_units(self._trel, self.y, self.dy) if self.center_data: y = y - strip_units(self.offset()) dy = np.ones_like(y) if dy is None else np.asarray(dy) X = self.design_matrix(frequency) parameters = np.linalg.solve(np.dot(X.T, X), np.dot(X.T, y / dy)) if units: parameters = get_unit(self.y) * parameters return parameters def design_matrix(self, frequency, t=None): """Compute the design matrix for a given frequency. Parameters ---------- frequency : float the frequency for the model t : array-like, `~astropy.units.Quantity`, or `~astropy.time.Time` (optional) Times (length ``n_samples``) at which to compute the model. If not specified, then the times and uncertainties of the input data are used. Returns ------- X : array The design matrix for the model at the given frequency. This should have a shape of (``len(t)``, ``n_parameters``). See Also -------- model model_parameters offset """ if t is None: t, dy = strip_units(self._trel, self.dy) else: t, dy = strip_units(self._validate_t(self._as_relative_time("t", t)), None) return design_matrix(t, frequency, dy, nterms=self.nterms, bias=self.fit_mean) def distribution(self, power, cumulative=False): """Expected periodogram distribution under the null hypothesis. This computes the expected probability distribution or cumulative probability distribution of periodogram power, under the null hypothesis of a non-varying signal with Gaussian noise. Note that this is not the same as the expected distribution of peak values; for that see the ``false_alarm_probability()`` method. Parameters ---------- power : array-like The periodogram power at which to compute the distribution. cumulative : bool, optional If True, then return the cumulative distribution. See Also -------- false_alarm_probability false_alarm_level Returns ------- dist : np.ndarray The probability density or cumulative probability associated with the provided powers. """ dH = 1 if self.fit_mean or self.center_data else 0 dK = dH + 2 * self.nterms dist = _statistics.cdf_single if cumulative else _statistics.pdf_single return dist(power, len(self._trel), self.normalization, dH=dH, dK=dK) def false_alarm_probability( self, power, method="baluev", samples_per_peak=5, nyquist_factor=5, minimum_frequency=None, maximum_frequency=None, method_kwds=None, ): """False alarm probability of periodogram maxima under the null hypothesis. This gives an estimate of the false alarm probability given the height of the largest peak in the periodogram, based on the null hypothesis of non-varying data with Gaussian noise. Parameters ---------- power : array-like The periodogram value. method : {'baluev', 'davies', 'naive', 'bootstrap'}, optional The approximation method to use. maximum_frequency : float The maximum frequency of the periodogram. method_kwds : dict, optional Additional method-specific keywords. Returns ------- false_alarm_probability : np.ndarray The false alarm probability Notes ----- The true probability distribution for the largest peak cannot be determined analytically, so each method here provides an approximation to the value. The available methods are: - "baluev" (default): the upper-limit to the alias-free probability, using the approach of Baluev (2008) [1]_. - "davies" : the Davies upper bound from Baluev (2008) [1]_. - "naive" : the approximate probability based on an estimated effective number of independent frequencies. - "bootstrap" : the approximate probability based on bootstrap resamplings of the input data. Note also that for normalization='psd', the distribution can only be computed for periodograms constructed with errors specified. See Also -------- distribution false_alarm_level References ---------- .. [1] Baluev, R.V. MNRAS 385, 1279 (2008) """ if self.nterms != 1: raise NotImplementedError( "false alarm probability is not implemented for multiterm periodograms." ) if not (self.fit_mean or self.center_data): raise NotImplementedError( "false alarm probability is implemented " "only for periodograms of centered data." ) fmin, fmax = self.autofrequency( samples_per_peak=samples_per_peak, nyquist_factor=nyquist_factor, minimum_frequency=minimum_frequency, maximum_frequency=maximum_frequency, return_freq_limits=True, ) return _statistics.false_alarm_probability( power, fmax=fmax, t=self._trel, y=self.y, dy=self.dy, normalization=self.normalization, method=method, method_kwds=method_kwds, ) def false_alarm_level( self, false_alarm_probability, method="baluev", samples_per_peak=5, nyquist_factor=5, minimum_frequency=None, maximum_frequency=None, method_kwds=None, ): """Level of maximum at a given false alarm probability. This gives an estimate of the periodogram level corresponding to a specified false alarm probability for the largest peak, assuming a null hypothesis of non-varying data with Gaussian noise. Parameters ---------- false_alarm_probability : array-like The false alarm probability (0 < fap < 1). maximum_frequency : float The maximum frequency of the periodogram. method : {'baluev', 'davies', 'naive', 'bootstrap'}, optional The approximation method to use; default='baluev'. method_kwds : dict, optional Additional method-specific keywords. Returns ------- power : np.ndarray The periodogram peak height corresponding to the specified false alarm probability. Notes ----- The true probability distribution for the largest peak cannot be determined analytically, so each method here provides an approximation to the value. The available methods are: - "baluev" (default): the upper-limit to the alias-free probability, using the approach of Baluev (2008) [1]_. - "davies" : the Davies upper bound from Baluev (2008) [1]_. - "naive" : the approximate probability based on an estimated effective number of independent frequencies. - "bootstrap" : the approximate probability based on bootstrap resamplings of the input data. The number of samples can be set with the method-specific keyword "n_bootstraps" (default=1000). Note also that for normalization='psd', the distribution can only be computed for periodograms constructed with errors specified. See Also -------- distribution false_alarm_probability References ---------- .. [1] Baluev, R.V. MNRAS 385, 1279 (2008) """ if self.nterms != 1: raise NotImplementedError( "false alarm probability is not implemented for multiterm periodograms." ) if not (self.fit_mean or self.center_data): raise NotImplementedError( "false alarm probability is implemented " "only for periodograms of centered data." ) fmin, fmax = self.autofrequency( samples_per_peak=samples_per_peak, nyquist_factor=nyquist_factor, minimum_frequency=minimum_frequency, maximum_frequency=maximum_frequency, return_freq_limits=True, ) return _statistics.false_alarm_level( false_alarm_probability, fmax=fmax, t=self._trel, y=self.y, dy=self.dy, normalization=self.normalization, method=method, method_kwds=method_kwds, )
5e6aa3fc148a4171797aa0f1e4d2dba5ad3581a80ea24c3ae1fe63284414e91d
import numpy as np from .mle import design_matrix def lombscargle_chi2( t, y, dy, frequency, normalization="standard", fit_mean=True, center_data=True, nterms=1, ): """Lomb-Scargle Periodogram. This implements a chi-squared-based periodogram, which is relatively slow but useful for validating the faster algorithms in the package. Parameters ---------- t, y, dy : array-like times, values, and errors of the data points. These should be broadcastable to the same shape. None should be `~astropy.units.Quantity``. frequency : array-like frequencies (not angular frequencies) at which to calculate periodogram normalization : str, optional Normalization to use for the periodogram. Options are 'standard', 'model', 'log', or 'psd'. fit_mean : bool, optional if True, include a constant offset as part of the model at each frequency. This can lead to more accurate results, especially in the case of incomplete phase coverage. center_data : bool, optional if True, pre-center the data by subtracting the weighted mean of the input data. This is especially important if ``fit_mean = False`` nterms : int, optional Number of Fourier terms in the fit Returns ------- power : array-like Lomb-Scargle power associated with each frequency. Units of the result depend on the normalization. References ---------- .. [1] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009) .. [2] W. Press et al, Numerical Recipes in C (2002) .. [3] Scargle, J.D. 1982, ApJ 263:835-853 """ if dy is None: dy = 1 t, y, dy = np.broadcast_arrays(t, y, dy) frequency = np.asarray(frequency) if t.ndim != 1: raise ValueError("t, y, dy should be one dimensional") if frequency.ndim != 1: raise ValueError("frequency should be one-dimensional") w = dy**-2.0 w /= w.sum() # if fit_mean is true, centering the data now simplifies the math below. if center_data or fit_mean: yw = (y - np.dot(w, y)) / dy else: yw = y / dy chi2_ref = np.dot(yw, yw) # compute the unnormalized model chi2 at each frequency def compute_power(f): X = design_matrix(t, f, dy=dy, bias=fit_mean, nterms=nterms) XTX = np.dot(X.T, X) XTy = np.dot(X.T, yw) return np.dot(XTy.T, np.linalg.solve(XTX, XTy)) p = np.array([compute_power(f) for f in frequency]) if normalization == "psd": p *= 0.5 elif normalization == "model": p /= chi2_ref - p elif normalization == "log": p = -np.log(1 - p / chi2_ref) elif normalization == "standard": p /= chi2_ref else: raise ValueError(f"normalization='{normalization}' not recognized") return p
bf0c5527943439877ad37c2606deeacdfcab6b0c53fd046b537fb3bb3fec8467
""" Main Lomb-Scargle Implementation. The ``lombscargle`` function here is essentially a sophisticated switch statement for the various implementations available in this submodule """ __all__ = ["lombscargle", "available_methods"] import numpy as np from .chi2_impl import lombscargle_chi2 from .cython_impl import lombscargle_cython from .fast_impl import lombscargle_fast from .fastchi2_impl import lombscargle_fastchi2 from .scipy_impl import lombscargle_scipy from .slow_impl import lombscargle_slow METHODS = { "slow": lombscargle_slow, "fast": lombscargle_fast, "chi2": lombscargle_chi2, "scipy": lombscargle_scipy, "fastchi2": lombscargle_fastchi2, "cython": lombscargle_cython, } def available_methods(): methods = ["auto", "slow", "chi2", "cython", "fast", "fastchi2"] # Scipy required for scipy algorithm (obviously) try: import scipy # noqa: F401 except ImportError: pass else: methods.append("scipy") return methods def _is_regular(frequency): frequency = np.asarray(frequency) if frequency.ndim != 1: return False elif len(frequency) == 1: return True else: diff = np.diff(frequency) return np.allclose(diff[0], diff) def _get_frequency_grid(frequency, assume_regular_frequency=False): """Utility to get grid parameters from a frequency array. Parameters ---------- frequency : array-like or `~astropy.units.Quantity` ['frequency'] input frequency grid assume_regular_frequency : bool (default = False) if True, then do not check whether frequency is a regular grid Returns ------- f0, df, N : scalar Parameters such that all(frequency == f0 + df * np.arange(N)) """ frequency = np.asarray(frequency) if frequency.ndim != 1: raise ValueError("frequency grid must be 1 dimensional") elif len(frequency) == 1: return frequency[0], frequency[0], 1 elif not (assume_regular_frequency or _is_regular(frequency)): raise ValueError("frequency must be a regular grid") return frequency[0], frequency[1] - frequency[0], len(frequency) def validate_method(method, dy, fit_mean, nterms, frequency, assume_regular_frequency): """ Validate the method argument, and if method='auto' choose the appropriate method. """ methods = available_methods() prefer_fast = len(frequency) > 200 and ( assume_regular_frequency or _is_regular(frequency) ) prefer_scipy = "scipy" in methods and dy is None and not fit_mean # automatically choose the appropriate method if method == "auto": if nterms != 1: if prefer_fast: method = "fastchi2" else: method = "chi2" elif prefer_fast: method = "fast" elif prefer_scipy: method = "scipy" else: method = "cython" if method not in METHODS: raise ValueError(f"invalid method: {method}") return method def lombscargle( t, y, dy=None, frequency=None, method="auto", assume_regular_frequency=False, normalization="standard", fit_mean=True, center_data=True, method_kwds=None, nterms=1, ): """ Compute the Lomb-scargle Periodogram with a given method. Parameters ---------- t : array-like sequence of observation times y : array-like sequence of observations associated with times t dy : float or array-like, optional error or sequence of observational errors associated with times t frequency : array-like frequencies (not angular frequencies) at which to evaluate the periodogram. If not specified, optimal frequencies will be chosen using a heuristic which will attempt to provide sufficient frequency range and sampling so that peaks will not be missed. Note that in order to use method='fast', frequencies must be regularly spaced. method : str, optional specify the lomb scargle implementation to use. Options are: - 'auto': choose the best method based on the input - 'fast': use the O[N log N] fast method. Note that this requires evenly-spaced frequencies: by default this will be checked unless ``assume_regular_frequency`` is set to True. - `slow`: use the O[N^2] pure-python implementation - `chi2`: use the O[N^2] chi2/linear-fitting implementation - `fastchi2`: use the O[N log N] chi2 implementation. Note that this requires evenly-spaced frequencies: by default this will be checked unless `assume_regular_frequency` is set to True. - `scipy`: use ``scipy.signal.lombscargle``, which is an O[N^2] implementation written in C. Note that this does not support heteroskedastic errors. assume_regular_frequency : bool, optional if True, assume that the input frequency is of the form freq = f0 + df * np.arange(N). Only referenced if method is 'auto' or 'fast'. normalization : str, optional Normalization to use for the periodogram. Options are 'standard' or 'psd'. fit_mean : bool, optional if True, include a constant offset as part of the model at each frequency. This can lead to more accurate results, especially in the case of incomplete phase coverage. center_data : bool, optional if True, pre-center the data by subtracting the weighted mean of the input data. This is especially important if `fit_mean = False` method_kwds : dict, optional additional keywords to pass to the lomb-scargle method nterms : int, optional number of Fourier terms to use in the periodogram. Not supported with every method. Returns ------- PLS : array-like Lomb-Scargle power associated with each frequency omega """ # frequencies should be one-dimensional arrays output_shape = frequency.shape frequency = frequency.ravel() # we'll need to adjust args and kwds for each method args = (t, y, dy) kwds = dict( frequency=frequency, center_data=center_data, fit_mean=fit_mean, normalization=normalization, nterms=nterms, **(method_kwds or {}), ) method = validate_method( method, dy=dy, fit_mean=fit_mean, nterms=nterms, frequency=frequency, assume_regular_frequency=assume_regular_frequency, ) # scipy doesn't support dy or fit_mean=True if method == "scipy": if kwds.pop("fit_mean"): raise ValueError("scipy method does not support fit_mean=True") if dy is not None: dy = np.ravel(np.asarray(dy)) if not np.allclose(dy[0], dy): raise ValueError("scipy method only supports uniform uncertainties dy") args = (t, y) # fast methods require frequency expressed as a grid if method.startswith("fast"): f0, df, Nf = _get_frequency_grid( kwds.pop("frequency"), assume_regular_frequency ) kwds.update(f0=f0, df=df, Nf=Nf) # only chi2 methods support nterms if not method.endswith("chi2"): if kwds.pop("nterms") != 1: raise ValueError( "nterms != 1 only supported with 'chi2' or 'fastchi2' methods" ) PLS = METHODS[method](*args, **kwds) return PLS.reshape(output_shape)
d1f0a7a26a491f320e5709fd8aa9809706e7c68bec151f5303a39aaa3d419b4c
"""Various implementations of the Lomb-Scargle Periodogram.""" from .chi2_impl import lombscargle_chi2 from .fast_impl import lombscargle_fast from .fastchi2_impl import lombscargle_fastchi2 from .main import available_methods, lombscargle from .scipy_impl import lombscargle_scipy from .slow_impl import lombscargle_slow
257d5a8fdf9479f3fe7d189c89a3f03567d7eeb33c3bfe6a72a35e0e5f919dbf
import numpy as np from .utils import trig_sum def lombscargle_fast( t, y, dy, f0, df, Nf, center_data=True, fit_mean=True, normalization="standard", use_fft=True, trig_sum_kwds=None, ): """Fast Lomb-Scargle Periodogram. This implements the Press & Rybicki method [1]_ for fast O[N log(N)] Lomb-Scargle periodograms. Parameters ---------- t, y, dy : array-like times, values, and errors of the data points. These should be broadcastable to the same shape. None should be `~astropy.units.Quantity`. f0, df, Nf : (float, float, int) parameters describing the frequency grid, f = f0 + df * arange(Nf). center_data : bool (default=True) Specify whether to subtract the mean of the data before the fit fit_mean : bool (default=True) If True, then compute the floating-mean periodogram; i.e. let the mean vary with the fit. normalization : str, optional Normalization to use for the periodogram. Options are 'standard', 'model', 'log', or 'psd'. use_fft : bool (default=True) If True, then use the Press & Rybicki O[NlogN] algorithm to compute the result. Otherwise, use a slower O[N^2] algorithm trig_sum_kwds : dict or None, optional extra keyword arguments to pass to the ``trig_sum`` utility. Options are ``oversampling`` and ``Mfft``. See documentation of ``trig_sum`` for details. Returns ------- power : ndarray Lomb-Scargle power associated with each frequency. Units of the result depend on the normalization. Notes ----- Note that the ``use_fft=True`` algorithm is an approximation to the true Lomb-Scargle periodogram, and as the number of points grows this approximation improves. On the other hand, for very small datasets (<~50 points or so) this approximation may not be useful. References ---------- .. [1] Press W.H. and Rybicki, G.B, "Fast algorithm for spectral analysis of unevenly sampled data". ApJ 1:338, p277, 1989 .. [2] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009) .. [3] W. Press et al, Numerical Recipes in C (2002) """ if dy is None: dy = 1 # Validate and setup input data t, y, dy = np.broadcast_arrays(t, y, dy) if t.ndim != 1: raise ValueError("t, y, dy should be one dimensional") # Validate and setup frequency grid if f0 < 0: raise ValueError("Frequencies must be positive") if df <= 0: raise ValueError("Frequency steps must be positive") if Nf <= 0: raise ValueError("Number of frequencies must be positive") w = dy**-2.0 w /= w.sum() # Center the data. Even if we're fitting the offset, # this step makes the expressions below more succinct if center_data or fit_mean: y = y - np.dot(w, y) # set up arguments to trig_sum kwargs = dict.copy(trig_sum_kwds or {}) kwargs.update(f0=f0, df=df, use_fft=use_fft, N=Nf) # ---------------------------------------------------------------------- # 1. compute functions of the time-shift tau at each frequency Sh, Ch = trig_sum(t, w * y, **kwargs) S2, C2 = trig_sum(t, w, freq_factor=2, **kwargs) if fit_mean: S, C = trig_sum(t, w, **kwargs) tan_2omega_tau = (S2 - 2 * S * C) / (C2 - (C * C - S * S)) else: tan_2omega_tau = S2 / C2 # This is what we're computing below; the straightforward way is slower # and less stable, so we use trig identities instead # # omega_tau = 0.5 * np.arctan(tan_2omega_tau) # S2w, C2w = np.sin(2 * omega_tau), np.cos(2 * omega_tau) # Sw, Cw = np.sin(omega_tau), np.cos(omega_tau) S2w = tan_2omega_tau / np.sqrt(1 + tan_2omega_tau * tan_2omega_tau) C2w = 1 / np.sqrt(1 + tan_2omega_tau * tan_2omega_tau) Cw = np.sqrt(0.5) * np.sqrt(1 + C2w) Sw = np.sqrt(0.5) * np.sign(S2w) * np.sqrt(1 - C2w) # ---------------------------------------------------------------------- # 2. Compute the periodogram, following Zechmeister & Kurster # and using tricks from Press & Rybicki. YY = np.dot(w, y**2) YC = Ch * Cw + Sh * Sw YS = Sh * Cw - Ch * Sw CC = 0.5 * (1 + C2 * C2w + S2 * S2w) SS = 0.5 * (1 - C2 * C2w - S2 * S2w) if fit_mean: CC -= (C * Cw + S * Sw) ** 2 SS -= (S * Cw - C * Sw) ** 2 power = YC * YC / CC + YS * YS / SS if normalization == "standard": power /= YY elif normalization == "model": power /= YY - power elif normalization == "log": power = -np.log(1 - power / YY) elif normalization == "psd": power *= 0.5 * (dy**-2.0).sum() else: raise ValueError(f"normalization='{normalization}' not recognized") return power
62352c8af1d5d54a04dc03507d7dc72d90eee5a064e01df7706a80ab5d48d226
import numpy as np def lombscargle_scipy(t, y, frequency, normalization="standard", center_data=True): """Lomb-Scargle Periodogram. This is a wrapper of ``scipy.signal.lombscargle`` for computation of the Lomb-Scargle periodogram. This is a relatively fast version of the naive O[N^2] algorithm, but cannot handle heteroskedastic errors. Parameters ---------- t, y : array-like times, values, and errors of the data points. These should be broadcastable to the same shape. None should be `~astropy.units.Quantity`. frequency : array-like frequencies (not angular frequencies) at which to calculate periodogram normalization : str, optional Normalization to use for the periodogram. Options are 'standard', 'model', 'log', or 'psd'. center_data : bool, optional if True, pre-center the data by subtracting the weighted mean of the input data. Returns ------- power : array-like Lomb-Scargle power associated with each frequency. Units of the result depend on the normalization. References ---------- .. [1] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009) .. [2] W. Press et al, Numerical Recipes in C (2002) .. [3] Scargle, J.D. 1982, ApJ 263:835-853 """ try: from scipy import signal except ImportError: raise ImportError("scipy must be installed to use lombscargle_scipy") t, y = np.broadcast_arrays(t, y) # Scipy requires floating-point input t = np.asarray(t, dtype=float) y = np.asarray(y, dtype=float) frequency = np.asarray(frequency, dtype=float) if t.ndim != 1: raise ValueError("t, y, dy should be one dimensional") if frequency.ndim != 1: raise ValueError("frequency should be one-dimensional") if center_data: y = y - y.mean() # Note: scipy input accepts angular frequencies p = signal.lombscargle(t, y, 2 * np.pi * frequency) if normalization == "psd": pass elif normalization == "standard": p *= 2 / (t.size * np.mean(y**2)) elif normalization == "log": p = -np.log(1 - 2 * p / (t.size * np.mean(y**2))) elif normalization == "model": p /= 0.5 * t.size * np.mean(y**2) - p else: raise ValueError(f"normalization='{normalization}' not recognized") return p
9f425c6b61f8751ca8e035d9d464da1e33711f0d3a49db02f7042d7f9024afe6
from math import factorial import numpy as np def bitceil(N): """ Find the bit (i.e. power of 2) immediately greater than or equal to N Note: this works for numbers up to 2 ** 64. Roughly equivalent to int(2 ** np.ceil(np.log2(N))). """ return 1 << int(N - 1).bit_length() def extirpolate(x, y, N=None, M=4): """ Extirpolate the values (x, y) onto an integer grid range(N), using lagrange polynomial weights on the M nearest points. Parameters ---------- x : array-like array of abscissas y : array-like array of ordinates N : int number of integer bins to use. For best performance, N should be larger than the maximum of x M : int number of adjoining points on which to extirpolate. Returns ------- yN : ndarray N extirpolated values associated with range(N) Examples -------- >>> rng = np.random.default_rng(0) >>> x = 100 * rng.random(20) >>> y = np.sin(x) >>> y_hat = extirpolate(x, y) >>> x_hat = np.arange(len(y_hat)) >>> f = lambda x: np.sin(x / 10) >>> np.allclose(np.sum(y * f(x)), np.sum(y_hat * f(x_hat))) True Notes ----- This code is based on the C implementation of spread() presented in Numerical Recipes in C, Second Edition (Press et al. 1989; p.583). """ x, y = map(np.ravel, np.broadcast_arrays(x, y)) if N is None: N = int(np.max(x) + 0.5 * M + 1) # Now use legendre polynomial weights to populate the results array; # This is an efficient recursive implementation (See Press et al. 1989) result = np.zeros(N, dtype=y.dtype) # first take care of the easy cases where x is an integer integers = x % 1 == 0 np.add.at(result, x[integers].astype(int), y[integers]) x, y = x[~integers], y[~integers] # For each remaining x, find the index describing the extirpolation range. # i.e. ilo[i] < x[i] < ilo[i] + M with x[i] in the center, # adjusted so that the limits are within the range 0...N ilo = np.clip((x - M // 2).astype(int), 0, N - M) numerator = y * np.prod(x - ilo - np.arange(M)[:, np.newaxis], 0) denominator = factorial(M - 1) for j in range(M): if j > 0: denominator *= j / (j - M) ind = ilo + (M - 1 - j) np.add.at(result, ind, numerator / (denominator * (x - ind))) return result def trig_sum(t, h, df, N, f0=0, freq_factor=1, oversampling=5, use_fft=True, Mfft=4): """Compute (approximate) trigonometric sums for a number of frequencies. This routine computes weighted sine and cosine sums:: S_j = sum_i { h_i * sin(2 pi * f_j * t_i) } C_j = sum_i { h_i * cos(2 pi * f_j * t_i) } Where f_j = freq_factor * (f0 + j * df) for the values j in 1 ... N. The sums can be computed either by a brute force O[N^2] method, or by an FFT-based O[Nlog(N)] method. Parameters ---------- t : array-like array of input times h : array-like array weights for the sum df : float frequency spacing N : int number of frequency bins to return f0 : float, optional The low frequency to use freq_factor : float, optional Factor which multiplies the frequency use_fft : bool if True, use the approximate FFT algorithm to compute the result. This uses the FFT with Press & Rybicki's Lagrangian extirpolation. oversampling : int (default = 5) oversampling freq_factor for the approximation; roughly the number of time samples across the highest-frequency sinusoid. This parameter contains the trade-off between accuracy and speed. Not referenced if use_fft is False. Mfft : int The number of adjacent points to use in the FFT approximation. Not referenced if use_fft is False. Returns ------- S, C : ndarray summation arrays for frequencies f = df * np.arange(1, N + 1) """ df *= freq_factor f0 *= freq_factor if df <= 0: raise ValueError("df must be positive") t, h = map(np.ravel, np.broadcast_arrays(t, h)) if use_fft: Mfft = int(Mfft) if Mfft <= 0: raise ValueError("Mfft must be positive") # required size of fft is the power of 2 above the oversampling rate Nfft = bitceil(N * oversampling) t0 = t.min() if f0 > 0: h = h * np.exp(2j * np.pi * f0 * (t - t0)) tnorm = ((t - t0) * Nfft * df) % Nfft grid = extirpolate(tnorm, h, Nfft, Mfft) fftgrid = np.fft.ifft(grid)[:N] if t0 != 0: f = f0 + df * np.arange(N) fftgrid *= np.exp(2j * np.pi * t0 * f) C = Nfft * fftgrid.real S = Nfft * fftgrid.imag else: f = f0 + df * np.arange(N) C = np.dot(h, np.cos(2 * np.pi * f * t[:, np.newaxis])) S = np.dot(h, np.sin(2 * np.pi * f * t[:, np.newaxis])) return S, C
132380bfcabe1ef65a10e79122209a7a2d020c4c96cafe4bbdaf28dd69350f90
import numpy as np def lombscargle_slow( t, y, dy, frequency, normalization="standard", fit_mean=True, center_data=True ): """Lomb-Scargle Periodogram. This is a pure-python implementation of the original Lomb-Scargle formalism (e.g. [1]_, [2]_), with the addition of the floating mean (e.g. [3]_) Parameters ---------- t, y, dy : array-like times, values, and errors of the data points. These should be broadcastable to the same shape. None should be `~astropy.units.Quantity`. frequency : array-like frequencies (not angular frequencies) at which to calculate periodogram normalization : str, optional Normalization to use for the periodogram. Options are 'standard', 'model', 'log', or 'psd'. fit_mean : bool, optional if True, include a constant offset as part of the model at each frequency. This can lead to more accurate results, especially in the case of incomplete phase coverage. center_data : bool, optional if True, pre-center the data by subtracting the weighted mean of the input data. This is especially important if ``fit_mean = False`` Returns ------- power : array-like Lomb-Scargle power associated with each frequency. Units of the result depend on the normalization. References ---------- .. [1] W. Press et al, Numerical Recipes in C (2002) .. [2] Scargle, J.D. 1982, ApJ 263:835-853 .. [3] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009) """ if dy is None: dy = 1 t, y, dy = np.broadcast_arrays(t, y, dy) frequency = np.asarray(frequency) if t.ndim != 1: raise ValueError("t, y, dy should be one dimensional") if frequency.ndim != 1: raise ValueError("frequency should be one-dimensional") w = dy**-2.0 w /= w.sum() # if fit_mean is true, centering the data now simplifies the math below. if fit_mean or center_data: y = y - np.dot(w, y) omega = 2 * np.pi * frequency omega = omega.ravel()[np.newaxis, :] # make following arrays into column vectors t, y, dy, w = map(lambda x: x[:, np.newaxis], (t, y, dy, w)) sin_omega_t = np.sin(omega * t) cos_omega_t = np.cos(omega * t) # compute time-shift tau # S2 = np.dot(w.T, np.sin(2 * omega * t) S2 = 2 * np.dot(w.T, sin_omega_t * cos_omega_t) # C2 = np.dot(w.T, np.cos(2 * omega * t) C2 = 2 * np.dot(w.T, 0.5 - sin_omega_t**2) if fit_mean: S = np.dot(w.T, sin_omega_t) C = np.dot(w.T, cos_omega_t) S2 -= 2 * S * C C2 -= C * C - S * S # compute components needed for the fit omega_t_tau = omega * t - 0.5 * np.arctan2(S2, C2) sin_omega_t_tau = np.sin(omega_t_tau) cos_omega_t_tau = np.cos(omega_t_tau) Y = np.dot(w.T, y) wy = w * y YCtau = np.dot(wy.T, cos_omega_t_tau) YStau = np.dot(wy.T, sin_omega_t_tau) CCtau = np.dot(w.T, cos_omega_t_tau * cos_omega_t_tau) SStau = np.dot(w.T, sin_omega_t_tau * sin_omega_t_tau) if fit_mean: Ctau = np.dot(w.T, cos_omega_t_tau) Stau = np.dot(w.T, sin_omega_t_tau) YCtau -= Y * Ctau YStau -= Y * Stau CCtau -= Ctau * Ctau SStau -= Stau * Stau p = YCtau * YCtau / CCtau + YStau * YStau / SStau YY = np.dot(w.T, y * y) if normalization == "standard": p /= YY elif normalization == "model": p /= YY - p elif normalization == "log": p = -np.log(1 - p / YY) elif normalization == "psd": p *= 0.5 * (dy**-2.0).sum() else: raise ValueError(f"normalization='{normalization}' not recognized") return p.ravel()
b3ec34fdf9076c6c4fb8469f37f07861c669e4d96a15322a5a4fe061fe9de85c
import numpy as np from .utils import trig_sum def lombscargle_fastchi2( t, y, dy, f0, df, Nf, normalization="standard", fit_mean=True, center_data=True, nterms=1, use_fft=True, trig_sum_kwds=None, ): """Lomb-Scargle Periodogram. This implements a fast chi-squared periodogram using the algorithm outlined in [4]_. The result is identical to the standard Lomb-Scargle periodogram. The advantage of this algorithm is the ability to compute multiterm periodograms relatively quickly. Parameters ---------- t, y, dy : array-like times, values, and errors of the data points. These should be broadcastable to the same shape. None should be `~astropy.units.Quantity`. f0, df, Nf : (float, float, int) parameters describing the frequency grid, f = f0 + df * arange(Nf). normalization : str, optional Normalization to use for the periodogram. Options are 'standard', 'model', 'log', or 'psd'. fit_mean : bool, optional if True, include a constant offset as part of the model at each frequency. This can lead to more accurate results, especially in the case of incomplete phase coverage. center_data : bool, optional if True, pre-center the data by subtracting the weighted mean of the input data. This is especially important if ``fit_mean = False`` nterms : int, optional Number of Fourier terms in the fit Returns ------- power : array-like Lomb-Scargle power associated with each frequency. Units of the result depend on the normalization. References ---------- .. [1] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009) .. [2] W. Press et al, Numerical Recipes in C (2002) .. [3] Scargle, J.D. ApJ 263:835-853 (1982) .. [4] Palmer, J. ApJ 695:496-502 (2009) """ if nterms == 0 and not fit_mean: raise ValueError("Cannot have nterms = 0 without fitting bias") if dy is None: dy = 1 # Validate and setup input data t, y, dy = np.broadcast_arrays(t, y, dy) if t.ndim != 1: raise ValueError("t, y, dy should be one dimensional") # Validate and setup frequency grid if f0 < 0: raise ValueError("Frequencies must be positive") if df <= 0: raise ValueError("Frequency steps must be positive") if Nf <= 0: raise ValueError("Number of frequencies must be positive") w = dy**-2.0 ws = np.sum(w) # if fit_mean is true, centering the data now simplifies the math below. if center_data or fit_mean: y = y - np.dot(w, y) / ws yw = y / dy chi2_ref = np.dot(yw, yw) kwargs = dict.copy(trig_sum_kwds or {}) kwargs.update(f0=f0, df=df, use_fft=use_fft, N=Nf) # Here we build-up the matrices XTX and XTy using pre-computed # sums. The relevant identities are # 2 sin(mx) sin(nx) = cos(m-n)x - cos(m+n)x # 2 cos(mx) cos(nx) = cos(m-n)x + cos(m+n)x # 2 sin(mx) cos(nx) = sin(m-n)x + sin(m+n)x yws = np.sum(y * w) SCw = [(np.zeros(Nf), ws * np.ones(Nf))] SCw.extend( [trig_sum(t, w, freq_factor=i, **kwargs) for i in range(1, 2 * nterms + 1)] ) Sw, Cw = zip(*SCw) SCyw = [(np.zeros(Nf), yws * np.ones(Nf))] SCyw.extend( [trig_sum(t, w * y, freq_factor=i, **kwargs) for i in range(1, nterms + 1)] ) Syw, Cyw = zip(*SCyw) # Now create an indexing scheme so we can quickly # build-up matrices at each frequency order = [("C", 0)] if fit_mean else [] order.extend(sum(([("S", i), ("C", i)] for i in range(1, nterms + 1)), [])) funcs = dict( S=lambda m, i: Syw[m][i], C=lambda m, i: Cyw[m][i], SS=lambda m, n, i: 0.5 * (Cw[abs(m - n)][i] - Cw[m + n][i]), CC=lambda m, n, i: 0.5 * (Cw[abs(m - n)][i] + Cw[m + n][i]), SC=lambda m, n, i: 0.5 * (np.sign(m - n) * Sw[abs(m - n)][i] + Sw[m + n][i]), CS=lambda m, n, i: 0.5 * (np.sign(n - m) * Sw[abs(n - m)][i] + Sw[n + m][i]), ) def compute_power(i): XTX = np.array( [[funcs[A[0] + B[0]](A[1], B[1], i) for A in order] for B in order] ) XTy = np.array([funcs[A[0]](A[1], i) for A in order]) return np.dot(XTy.T, np.linalg.solve(XTX, XTy)) p = np.array([compute_power(i) for i in range(Nf)]) if normalization == "psd": p *= 0.5 elif normalization == "standard": p /= chi2_ref elif normalization == "log": p = -np.log(1 - p / chi2_ref) elif normalization == "model": p /= chi2_ref - p else: raise ValueError(f"normalization='{normalization}' not recognized") return p
3e5c1f5d5f3d5011df879d0cf3b0d1cf26cc68ca4a32aaeba4d84f8b9897d8f8
import numpy as np def design_matrix(t, frequency, dy=None, bias=True, nterms=1): """Compute the Lomb-Scargle design matrix at the given frequency. This is the matrix X such that the periodic model at the given frequency can be expressed :math:`\\hat{y} = X \\theta`. Parameters ---------- t : array-like, shape=(n_times,) times at which to compute the design matrix frequency : float frequency for the design matrix dy : float or array-like, optional data uncertainties: should be broadcastable with `t` bias : bool (default=True) If true, include a bias column in the matrix nterms : int (default=1) Number of Fourier terms to include in the model Returns ------- X : ndarray, shape=(n_times, n_parameters) The design matrix, where n_parameters = bool(bias) + 2 * nterms """ t = np.asarray(t) frequency = np.asarray(frequency) if t.ndim != 1: raise ValueError("t should be one dimensional") if frequency.ndim != 0: raise ValueError("frequency must be a scalar") if nterms == 0 and not bias: raise ValueError("cannot have nterms=0 and no bias") if bias: cols = [np.ones_like(t)] else: cols = [] for i in range(1, nterms + 1): cols.append(np.sin(2 * np.pi * i * frequency * t)) cols.append(np.cos(2 * np.pi * i * frequency * t)) XT = np.vstack(cols) if dy is not None: XT /= dy return np.transpose(XT) def periodic_fit(t, y, dy, frequency, t_fit, center_data=True, fit_mean=True, nterms=1): """Compute the Lomb-Scargle model fit at a given frequency. Parameters ---------- t, y, dy : float or array-like The times, observations, and uncertainties to fit frequency : float The frequency at which to compute the model t_fit : float or array-like The times at which the fit should be computed center_data : bool (default=True) If True, center the input data before applying the fit fit_mean : bool (default=True) If True, include the bias as part of the model nterms : int (default=1) The number of Fourier terms to include in the fit Returns ------- y_fit : ndarray The model fit evaluated at each value of t_fit """ t, y, frequency = map(np.asarray, (t, y, frequency)) if dy is None: dy = np.ones_like(y) else: dy = np.asarray(dy) t_fit = np.asarray(t_fit) if t.ndim != 1: raise ValueError("t, y, dy should be one dimensional") if t_fit.ndim != 1: raise ValueError("t_fit should be one dimensional") if frequency.ndim != 0: raise ValueError("frequency should be a scalar") if center_data: w = dy**-2.0 y_mean = np.dot(y, w) / w.sum() y = y - y_mean else: y_mean = 0 X = design_matrix(t, frequency, dy=dy, bias=fit_mean, nterms=nterms) theta_MLE = np.linalg.solve(np.dot(X.T, X), np.dot(X.T, y / dy)) X_fit = design_matrix(t_fit, frequency, bias=fit_mean, nterms=nterms) return y_mean + np.dot(X_fit, theta_MLE)
954b44c0a83e51a1c24f5d94795b3c22515175c99c0bb1ba243684c617c3fcb4
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Convenience functions for `astropy.cosmology`. """ import warnings import numpy as np from astropy.cosmology import units as cu from astropy.cosmology.core import CosmologyError from astropy.units import Quantity from astropy.utils.exceptions import AstropyUserWarning __all__ = ["z_at_value"] __doctest_requires__ = {"*": ["scipy"]} def _z_at_scalar_value( func, fval, zmin=1e-8, zmax=1000, ztol=1e-8, maxfun=500, method="Brent", bracket=None, verbose=False, ): """ Find the redshift ``z`` at which ``func(z) = fval``. See :func:`astropy.cosmology.funcs.z_at_value`. """ from scipy.optimize import minimize_scalar opt = {"maxiter": maxfun, "xtol": ztol} # Assume custom methods support the same options as default; otherwise user # will see warnings. if callable(method): # can skip callables pass elif str(method).lower() == "bounded": opt["xatol"] = opt.pop("xtol") if bracket is not None: warnings.warn(f"Option 'bracket' is ignored by method {method}.") bracket = None # fval falling inside the interval of bracketing function values does not # guarantee it has a unique solution, but for Standard Cosmological # quantities normally should (being monotonic or having a single extremum). # In these cases keep solver from returning solutions outside of bracket. fval_zmin, fval_zmax = func(zmin), func(zmax) nobracket = False if np.sign(fval - fval_zmin) != np.sign(fval_zmax - fval): if bracket is None: nobracket = True else: fval_brac = func(np.asanyarray(bracket)) if np.sign(fval - fval_brac[0]) != np.sign(fval_brac[-1] - fval): nobracket = True else: zmin, zmax = bracket[0], bracket[-1] fval_zmin, fval_zmax = fval_brac[[0, -1]] if nobracket: warnings.warn( f"fval is not bracketed by func(zmin)={fval_zmin} and " f"func(zmax)={fval_zmax}. This means either there is no " "solution, or that there is more than one solution " "between zmin and zmax satisfying fval = func(z).", AstropyUserWarning, ) if isinstance(fval_zmin, Quantity): val = fval.to_value(fval_zmin.unit) else: val = fval # Construct bounds (Brent and Golden fail if bounds are not None) if callable(method) or str(method).lower() not in {"brent", "golden"}: bounds = (zmin, zmax) else: bounds = None # Objective function to minimize. # 'Brent' and 'Golden' ignore `bounds` but this keeps the domain witihin the bounds. def f(z): if z > zmax: return 1.0e300 * (1.0 + z - zmax) elif z < zmin: return 1.0e300 * (1.0 + zmin - z) elif isinstance(fval_zmin, Quantity): return abs(func(z).value - val) else: return abs(func(z) - val) # Perform the minimization res = minimize_scalar(f, method=method, bounds=bounds, bracket=bracket, options=opt) # Scipy docs state that `OptimizeResult` always has 'status' and 'message' # attributes, but only `_minimize_scalar_bounded()` seems to have really # implemented them. if not res.success: warnings.warn( f"Solver returned {res.get('status')}:" f" {res.get('message', 'Unsuccessful')}\nPrecision {res.fun} reached after" f" {res.nfev} function calls.", AstropyUserWarning, ) if verbose: print(res) if np.allclose(res.x, zmax): raise CosmologyError( f"Best guess z={res.x} is very close to the upper z limit {zmax}." "\nTry re-running with a different zmax." ) elif np.allclose(res.x, zmin): raise CosmologyError( f"Best guess z={res.x} is very close to the lower z limit {zmin}." "\nTry re-running with a different zmin." ) return res.x def z_at_value( func, fval, zmin=1e-8, zmax=1000, ztol=1e-8, maxfun=500, method="Brent", bracket=None, verbose=False, ): """Find the redshift ``z`` at which ``func(z) = fval``. This finds the redshift at which one of the cosmology functions or methods (for example Planck13.distmod) is equal to a known value. .. warning:: Make sure you understand the behavior of the function that you are trying to invert! Depending on the cosmology, there may not be a unique solution. For example, in the standard Lambda CDM cosmology, there are two redshifts which give an angular diameter distance of 1500 Mpc, z ~ 0.7 and z ~ 3.8. To force ``z_at_value`` to find the solution you are interested in, use the ``zmin`` and ``zmax`` keywords to limit the search range (see the example below). Parameters ---------- func : function or method A function that takes a redshift as input. fval : `~astropy.units.Quantity` The (scalar or array) value of ``func(z)`` to recover. zmin : float or array-like['dimensionless'] or quantity-like, optional The lower search limit for ``z``. Beware of divergences in some cosmological functions, such as distance moduli, at z=0 (default 1e-8). zmax : float or array-like['dimensionless'] or quantity-like, optional The upper search limit for ``z`` (default 1000). ztol : float or array-like['dimensionless'], optional The relative error in ``z`` acceptable for convergence. maxfun : int or array-like, optional The maximum number of function evaluations allowed in the optimization routine (default 500). method : str or callable, optional Type of solver to pass to the minimizer. The built-in options provided by :func:`~scipy.optimize.minimize_scalar` are 'Brent' (default), 'Golden' and 'Bounded' with names case insensitive - see documentation there for details. It also accepts a custom solver by passing any user-provided callable object that meets the requirements listed therein under the Notes on "Custom minimizers" - or in more detail in :doc:`scipy:tutorial/optimize` - although their use is currently untested. .. versionadded:: 4.3 bracket : sequence or object array[sequence], optional For methods 'Brent' and 'Golden', ``bracket`` defines the bracketing interval and can either have three items (z1, z2, z3) so that z1 < z2 < z3 and ``func(z2) < func (z1), func(z3)`` or two items z1 and z3 which are assumed to be a starting interval for a downhill bracket search. For non-monotonic functions such as angular diameter distance this may be used to start the search on the desired side of the maximum, but see Examples below for usage notes. .. versionadded:: 4.3 verbose : bool, optional Print diagnostic output from solver (default `False`). .. versionadded:: 4.3 Returns ------- z : `~astropy.units.Quantity` ['redshift'] The redshift ``z`` satisfying ``zmin < z < zmax`` and ``func(z) = fval`` within ``ztol``. Has units of cosmological redshift. Warns ----- :class:`~astropy.utils.exceptions.AstropyUserWarning` If ``fval`` is not bracketed by ``func(zmin)=fval(zmin)`` and ``func(zmax)=fval(zmax)``. If the solver was not successful. Raises ------ :class:`astropy.cosmology.CosmologyError` If the result is very close to either ``zmin`` or ``zmax``. ValueError If ``bracket`` is not an array nor a 2 (or 3) element sequence. TypeError If ``bracket`` is not an object array. 2 (or 3) element sequences will be turned into object arrays, so this error should only occur if a non-object array is used for ``bracket``. Notes ----- This works for any arbitrary input cosmology, but is inefficient if you want to invert a large number of values for the same cosmology. In this case, it is faster to instead generate an array of values at many closely-spaced redshifts that cover the relevant redshift range, and then use interpolation to find the redshift at each value you are interested in. For example, to efficiently find the redshifts corresponding to 10^6 values of the distance modulus in a Planck13 cosmology, you could do the following: >>> import astropy.units as u >>> from astropy.cosmology import Planck13, z_at_value Generate 10^6 distance moduli between 24 and 44 for which we want to find the corresponding redshifts: >>> Dvals = (24 + np.random.rand(1000000) * 20) * u.mag Make a grid of distance moduli covering the redshift range we need using 50 equally log-spaced values between zmin and zmax. We use log spacing to adequately sample the steep part of the curve at low distance moduli: >>> zmin = z_at_value(Planck13.distmod, Dvals.min()) >>> zmax = z_at_value(Planck13.distmod, Dvals.max()) >>> zgrid = np.geomspace(zmin, zmax, 50) >>> Dgrid = Planck13.distmod(zgrid) Finally interpolate to find the redshift at each distance modulus: >>> zvals = np.interp(Dvals.value, Dgrid.value, zgrid) Examples -------- >>> import astropy.units as u >>> from astropy.cosmology import Planck13, Planck18, z_at_value The age and lookback time are monotonic with redshift, and so a unique solution can be found: >>> z_at_value(Planck13.age, 2 * u.Gyr) # doctest: +FLOAT_CMP <Quantity 3.19812268 redshift> The angular diameter is not monotonic however, and there are two redshifts that give a value of 1500 Mpc. You can use the zmin and zmax keywords to find the one you are interested in: >>> z_at_value(Planck18.angular_diameter_distance, ... 1500 * u.Mpc, zmax=1.5) # doctest: +FLOAT_CMP <Quantity 0.68044452 redshift> >>> z_at_value(Planck18.angular_diameter_distance, ... 1500 * u.Mpc, zmin=2.5) # doctest: +FLOAT_CMP <Quantity 3.7823268 redshift> Alternatively the ``bracket`` option may be used to initialize the function solver on a desired region, but one should be aware that this does not guarantee it will remain close to this starting bracket. For the example of angular diameter distance, which has a maximum near a redshift of 1.6 in this cosmology, defining a bracket on either side of this maximum will often return a solution on the same side: >>> z_at_value(Planck18.angular_diameter_distance, 1500 * u.Mpc, ... method="Brent", bracket=(1.0, 1.2)) # doctest: +FLOAT_CMP +IGNORE_WARNINGS <Quantity 0.68044452 redshift> But this is not ascertained especially if the bracket is chosen too wide and/or too close to the turning point: >>> z_at_value(Planck18.angular_diameter_distance, ... 1500 * u.Mpc, bracket=(0.1, 1.5)) # doctest: +SKIP <Quantity 3.7823268 redshift> # doctest: +SKIP Likewise, even for the same minimizer and same starting conditions different results can be found depending on architecture or library versions: >>> z_at_value(Planck18.angular_diameter_distance, ... 1500 * u.Mpc, bracket=(2.0, 2.5)) # doctest: +SKIP <Quantity 3.7823268 redshift> # doctest: +SKIP >>> z_at_value(Planck18.angular_diameter_distance, ... 1500 * u.Mpc, bracket=(2.0, 2.5)) # doctest: +SKIP <Quantity 0.68044452 redshift> # doctest: +SKIP It is therefore generally safer to use the 3-parameter variant to ensure the solution stays within the bracketing limits: >>> z_at_value(Planck18.angular_diameter_distance, 1500 * u.Mpc, method="Brent", ... bracket=(0.1, 1.0, 1.5)) # doctest: +FLOAT_CMP <Quantity 0.68044452 redshift> Also note that the luminosity distance and distance modulus (two other commonly inverted quantities) are monotonic in flat and open universes, but not in closed universes. All the arguments except ``func``, ``method`` and ``verbose`` accept array inputs. This does NOT use interpolation tables or any method to speed up evaluations, rather providing a convenient means to broadcast arguments over an element-wise scalar evaluation. The most common use case for non-scalar input is to evaluate 'func' for an array of ``fval``: >>> z_at_value(Planck13.age, [2, 7] * u.Gyr) # doctest: +FLOAT_CMP <Quantity [3.19812061, 0.75620443] redshift> ``fval`` can be any shape: >>> z_at_value(Planck13.age, [[2, 7], [1, 3]]*u.Gyr) # doctest: +FLOAT_CMP <Quantity [[3.19812061, 0.75620443], [5.67661227, 2.19131955]] redshift> Other arguments can be arrays. For non-monotic functions -- for example, the angular diameter distance -- this can be useful to find all solutions. >>> z_at_value(Planck13.angular_diameter_distance, 1500 * u.Mpc, ... zmin=[0, 2.5], zmax=[2, 4]) # doctest: +FLOAT_CMP <Quantity [0.68127747, 3.79149062] redshift> The ``bracket`` argument can likewise be be an array. However, since bracket must already be a sequence (or None), it MUST be given as an object `numpy.ndarray`. Importantly, the depth of the array must be such that each bracket subsequence is an object. Errors or unexpected results will happen otherwise. A convenient means to ensure the right depth is by including a length-0 tuple as a bracket and then truncating the object array to remove the placeholder. This can be seen in the following example: >>> bracket=np.array([(1.0, 1.2),(2.0, 2.5), ()], dtype=object)[:-1] >>> z_at_value(Planck18.angular_diameter_distance, 1500 * u.Mpc, ... bracket=bracket) # doctest: +SKIP <Quantity [0.68044452, 3.7823268] redshift> """ # `fval` can be a Quantity, which isn't (yet) compatible w/ `numpy.nditer` # so we strip it of units for broadcasting and restore the units when # passing the elements to `_z_at_scalar_value`. fval = np.asanyarray(fval) unit = getattr(fval, "unit", 1) # can be unitless zmin = Quantity(zmin, cu.redshift).value # must be unitless zmax = Quantity(zmax, cu.redshift).value # bracket must be an object array (assumed to be correct) or a 'scalar' # bracket: 2 or 3 elt sequence if not isinstance(bracket, np.ndarray): # 'scalar' bracket if bracket is not None and len(bracket) not in (2, 3): raise ValueError( "`bracket` is not an array nor a 2 (or 3) element sequence." ) else: # munge bracket into a 1-elt object array bracket = np.array([bracket, ()], dtype=object)[:1].squeeze() if bracket.dtype != np.object_: raise TypeError(f"`bracket` has dtype {bracket.dtype}, not 'O'") # make multi-dimensional iterator for all but `method`, `verbose` with np.nditer( [fval, zmin, zmax, ztol, maxfun, bracket, None], flags=["refs_ok"], op_flags=[ *[["readonly"]] * 6, # ← inputs output ↓ ["writeonly", "allocate", "no_subtype"], ], op_dtypes=(*(None,) * 6, fval.dtype), casting="no", ) as it: for fv, zmn, zmx, zt, mfe, bkt, zs in it: # ← eltwise unpack & eval ↓ zs[...] = _z_at_scalar_value( func, fv * unit, zmin=zmn, zmax=zmx, ztol=zt, maxfun=mfe, bracket=bkt.item(), # not broadcasted method=method, verbose=verbose, ) # since bracket is an object array, the output will be too, so it is # cast to the same type as the function value. result = it.operands[-1] # zs return result << cu.redshift
6b1f5bdf18bc86fdeec98d1acc18eec6c27f27c4f057659b89fea8b370ce3129
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ The following are private functions, included here **FOR REFERENCE ONLY** since the io registry cannot be displayed. These functions are registered into :meth:`~astropy.cosmology.Cosmology.to_format` and :meth:`~astropy.cosmology.Cosmology.from_format` and should only be accessed via these methods. """ # this is shown in the docs. import abc import copy import inspect import numpy as np from astropy.cosmology.connect import convert_registry from astropy.cosmology.core import Cosmology from astropy.modeling import FittableModel, Model from astropy.utils.decorators import classproperty from .utils import convert_parameter_to_model_parameter __all__ = [] # nothing is publicly scoped class _CosmologyModel(FittableModel): """Base class for Cosmology redshift-method Models. .. note:: This class is not publicly scoped so should not be used directly. Instead, from a Cosmology instance use ``.to_format("astropy.model")`` to create an instance of a subclass of this class. `_CosmologyModel` (subclasses) wrap a redshift-method of a :class:`~astropy.cosmology.Cosmology` class, converting each non-`None` |Cosmology| :class:`~astropy.cosmology.Parameter` to a :class:`astropy.modeling.Model` :class:`~astropy.modeling.Parameter` and the redshift-method to the model's ``__call__ / evaluate``. See Also -------- astropy.cosmology.Cosmology.to_format """ @abc.abstractmethod def _cosmology_class(self): """Cosmology class as a private attribute. Set in subclasses.""" @abc.abstractmethod def _method_name(self): """Cosmology method name as a private attribute. Set in subclasses.""" @classproperty def cosmology_class(cls): """|Cosmology| class.""" return cls._cosmology_class @property def cosmology(self): """Return |Cosmology| using `~astropy.modeling.Parameter` values.""" cosmo = self._cosmology_class( name=self.name, **{ k: (v.value if not (v := getattr(self, k)).unit else v.quantity) for k in self.param_names }, ) return cosmo @classproperty def method_name(self): """Redshift-method name on |Cosmology| instance.""" return self._method_name # --------------------------------------------------------------- def evaluate(self, *args, **kwargs): """Evaluate method {method!r} of {cosmo_cls!r} Cosmology. The Model wraps the :class:`~astropy.cosmology.Cosmology` method, converting each |Cosmology| :class:`~astropy.cosmology.Parameter` to a :class:`astropy.modeling.Model` :class:`~astropy.modeling.Parameter` (unless the Parameter is None, in which case it is skipped). Here an instance of the cosmology is created using the current Parameter values and the method is evaluated given the input. Parameters ---------- *args, **kwargs The first ``n_inputs`` of ``*args`` are for evaluating the method of the cosmology. The remaining args and kwargs are passed to the cosmology class constructor. Any unspecified Cosmology Parameter use the current value of the corresponding Model Parameter. Returns ------- Any Results of evaluating the Cosmology method. """ # create BoundArgument with all available inputs beyond the Parameters, # which will be filled in next ba = self.cosmology_class._init_signature.bind_partial( *args[self.n_inputs :], **kwargs ) # fill in missing Parameters for k in self.param_names: if k not in ba.arguments: v = getattr(self, k) ba.arguments[k] = v.value if not v.unit else v.quantity # unvectorize, since Cosmology is not vectorized # TODO! remove when vectorized if np.shape(ba.arguments[k]): # only in __call__ # m_nu is a special case # TODO! fix by making it 'structured' if k == "m_nu" and len(ba.arguments[k].shape) == 1: continue ba.arguments[k] = ba.arguments[k][0] # make instance of cosmology cosmo = self._cosmology_class(**ba.arguments) # evaluate method result = getattr(cosmo, self._method_name)(*args[: self.n_inputs]) return result ############################################################################## def from_model(model): """Load |Cosmology| from `~astropy.modeling.Model` object. Parameters ---------- model : `_CosmologyModel` subclass instance See ``Cosmology.to_format.help("astropy.model") for details. Returns ------- `~astropy.cosmology.Cosmology` subclass instance Examples -------- >>> from astropy.cosmology import Cosmology, Planck18 >>> model = Planck18.to_format("astropy.model", method="lookback_time") >>> Cosmology.from_format(model) FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897) """ cosmology = model.cosmology_class meta = copy.deepcopy(model.meta) # assemble the Parameters params = {} for n in model.param_names: p = getattr(model, n) params[p.name] = p.quantity if p.unit else p.value # put all attributes in a dict meta[p.name] = { n: getattr(p, n) for n in dir(p) if not (n.startswith("_") or callable(getattr(p, n))) } ba = cosmology._init_signature.bind(name=model.name, **params, meta=meta) return cosmology(*ba.args, **ba.kwargs) def to_model(cosmology, *_, method): """Convert a `~astropy.cosmology.Cosmology` to a `~astropy.modeling.Model`. Parameters ---------- cosmology : `~astropy.cosmology.Cosmology` subclass instance method : str, keyword-only The name of the method on the ``cosmology``. Returns ------- `_CosmologyModel` subclass instance The Model wraps the |Cosmology| method, converting each non-`None` :class:`~astropy.cosmology.Parameter` to a :class:`astropy.modeling.Model` :class:`~astropy.modeling.Parameter` and the method to the model's ``__call__ / evaluate``. Examples -------- >>> from astropy.cosmology import Planck18 >>> model = Planck18.to_format("astropy.model", method="lookback_time") >>> model <FlatLambdaCDMCosmologyLookbackTimeModel(H0=67.66 km / (Mpc s), Om0=0.30966, Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. , 0. , 0.06] eV, Ob0=0.04897, name='Planck18')> """ cosmo_cls = cosmology.__class__ # get bound method & sig from cosmology (unbound if class). if not hasattr(cosmology, method): raise AttributeError(f"{method} is not a method on {cosmology.__class__}.") func = getattr(cosmology, method) if not callable(func): raise ValueError(f"{cosmology.__class__}.{method} is not callable.") msig = inspect.signature(func) # introspect for number of positional inputs, ignoring "self" n_inputs = len([p for p in tuple(msig.parameters.values()) if (p.kind in (0, 1))]) attrs = {} # class attributes attrs["_cosmology_class"] = cosmo_cls attrs["_method_name"] = method attrs["n_inputs"] = n_inputs attrs["n_outputs"] = 1 params = {} # Parameters (also class attributes) for n in cosmology.__parameters__: v = getattr(cosmology, n) # parameter value if v is None: # skip unspecified parameters continue # add as Model Parameter params[n] = convert_parameter_to_model_parameter( getattr(cosmo_cls, n), v, cosmology.meta.get(n) ) # class name is cosmology name + Cosmology + method name + Model clsname = ( cosmo_cls.__qualname__.replace(".", "_") + "Cosmology" + method.replace("_", " ").title().replace(" ", "") + "Model" ) # make Model class CosmoModel = type(clsname, (_CosmologyModel,), {**attrs, **params}) # override __signature__ and format the doc. CosmoModel.evaluate.__signature__ = msig CosmoModel.evaluate.__doc__ = CosmoModel.evaluate.__doc__.format( cosmo_cls=cosmo_cls.__qualname__, method=method ) # instantiate class using default values ps = {n: getattr(cosmology, n) for n in params.keys()} model = CosmoModel(**ps, name=cosmology.name, meta=copy.deepcopy(cosmology.meta)) return model def model_identify(origin, format, *args, **kwargs): """Identify if object uses the :class:`~astropy.modeling.Model` format. Returns ------- bool """ itis = False if origin == "read": itis = isinstance(args[1], Model) and (format in (None, "astropy.model")) return itis # =================================================================== # Register convert_registry.register_reader("astropy.model", Cosmology, from_model) convert_registry.register_writer("astropy.model", Cosmology, to_model) convert_registry.register_identifier("astropy.model", Cosmology, model_identify)
8629ccfb8c49edf549571fa5adb3ab395b26d03032352adbed25ca531ea825d0
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Testing :mod:`astropy.cosmology.core`.""" ############################################################################## # IMPORTS # STDLIB import abc import inspect import pickle # THIRD PARTY import numpy as np import pytest # LOCAL import astropy.cosmology.units as cu import astropy.units as u from astropy.cosmology import Cosmology, FlatCosmologyMixin from astropy.cosmology.core import _COSMOLOGY_CLASSES from astropy.cosmology.parameter import Parameter from astropy.table import Column, QTable, Table from astropy.utils.compat import PYTHON_LT_3_11 from astropy.utils.exceptions import AstropyDeprecationWarning from astropy.utils.metadata import MetaData from .test_connect import ReadWriteTestMixin, ToFromFormatTestMixin from .test_parameter import ParameterTestMixin ############################################################################## # SETUP / TEARDOWN scalar_zs = [ 0, 1, 1100, # interesting times # FIXME! np.inf breaks some funcs. 0 * inf is an error np.float64(3300), # different type 2 * cu.redshift, 3 * u.one, # compatible units ] _zarr = np.linspace(0, 1e5, num=20) array_zs = [ _zarr, # numpy _zarr.tolist(), # pure python Column(_zarr), # table-like _zarr * cu.redshift, # Quantity ] valid_zs = scalar_zs + array_zs invalid_zs = [ (None, TypeError), # wrong type # Wrong units (the TypeError is for the cython, which can differ) (4 * u.MeV, (u.UnitConversionError, TypeError)), # scalar ([0, 1] * u.m, (u.UnitConversionError, TypeError)), # array ] class SubCosmology(Cosmology): """Defined here to be serializable.""" H0 = Parameter(unit="km/(s Mpc)") Tcmb0 = Parameter(unit=u.K) m_nu = Parameter(unit=u.eV) def __init__(self, H0, Tcmb0=0 * u.K, m_nu=0 * u.eV, name=None, meta=None): super().__init__(name=name, meta=meta) self.H0 = H0 self.Tcmb0 = Tcmb0 self.m_nu = m_nu @property def is_flat(self): return super().is_flat() ############################################################################## # TESTS ############################################################################## class MetaTestMixin: """Tests for a :class:`astropy.utils.metadata.MetaData` on a Cosmology.""" def test_meta_on_class(self, cosmo_cls): assert isinstance(cosmo_cls.meta, MetaData) def test_meta_on_instance(self, cosmo): assert isinstance(cosmo.meta, dict) # test type # value set at initialization assert cosmo.meta == self.cls_kwargs.get("meta", {}) def test_meta_mutable(self, cosmo): """The metadata is NOT immutable on a cosmology""" key = tuple(cosmo.meta.keys())[0] # select some key cosmo.meta[key] = cosmo.meta.pop(key) # will error if immutable class CosmologyTest( ParameterTestMixin, MetaTestMixin, ReadWriteTestMixin, ToFromFormatTestMixin, metaclass=abc.ABCMeta, ): """ Test subclasses of :class:`astropy.cosmology.Cosmology`. """ @abc.abstractmethod def setup_class(self): """Setup for testing.""" def teardown_class(self): pass @property def cls_args(self): return tuple(self._cls_args.values()) @pytest.fixture(scope="class") def cosmo_cls(self): """The Cosmology class as a :func:`pytest.fixture`.""" return self.cls @pytest.fixture(scope="function") # ensure not cached. def ba(self): """Return filled `inspect.BoundArguments` for cosmology.""" ba = self.cls._init_signature.bind(*self.cls_args, **self.cls_kwargs) ba.apply_defaults() return ba @pytest.fixture(scope="class") def cosmo(self, cosmo_cls): """The cosmology instance with which to test.""" ba = self.cls._init_signature.bind(*self.cls_args, **self.cls_kwargs) ba.apply_defaults() return cosmo_cls(*ba.args, **ba.kwargs) # =============================================================== # Method & Attribute Tests # --------------------------------------------------------------- # class-level def test_init_subclass(self, cosmo_cls): """Test creating subclasses registers classes and manages Parameters.""" class InitSubclassTest(cosmo_cls): pass # test parameters assert InitSubclassTest.__parameters__ == cosmo_cls.__parameters__ # test and cleanup registry registrant = _COSMOLOGY_CLASSES.pop(InitSubclassTest.__qualname__) assert registrant is InitSubclassTest def test_init_signature(self, cosmo_cls, cosmo): """Test class-property ``_init_signature``.""" # test presence assert hasattr(cosmo_cls, "_init_signature") assert hasattr(cosmo, "_init_signature") # test internal consistency, so following tests can use either cls or instance. assert cosmo_cls._init_signature == cosmo._init_signature # test matches __init__, but without 'self' sig = inspect.signature(cosmo.__init__) # (instances don't have self) assert set(sig.parameters.keys()) == set( cosmo._init_signature.parameters.keys() ) assert all( np.all(sig.parameters[k].default == p.default) for k, p in cosmo._init_signature.parameters.items() ) # --------------------------------------------------------------- # instance-level def test_init(self, cosmo_cls): """Test initialization.""" # Cosmology only does name and meta, but this subclass adds H0 & Tcmb0. cosmo = cosmo_cls(*self.cls_args, name="test_init", meta={"m": 1}) assert cosmo.name == "test_init" assert cosmo.meta["m"] == 1 # if meta is None, it is changed to a dict cosmo = cosmo_cls(*self.cls_args, name="test_init", meta=None) assert cosmo.meta == {} def test_name(self, cosmo): """Test property ``name``.""" assert cosmo.name is cosmo._name # accesses private attribute assert cosmo.name is None or isinstance(cosmo.name, str) # type assert cosmo.name == self.cls_kwargs["name"] # test has expected value # immutable match = ( "can't set" if PYTHON_LT_3_11 else f"property 'name' of {cosmo.__class__.__name__!r} object has no setter" ) with pytest.raises(AttributeError, match=match): cosmo.name = None @abc.abstractmethod def test_is_flat(self, cosmo_cls, cosmo): """Test property ``is_flat``.""" # ------------------------------------------------ # clone def test_clone_identical(self, cosmo): """Test method ``.clone()`` if no (kw)args.""" assert cosmo.clone() is cosmo def test_clone_name(self, cosmo): """Test method ``.clone()`` name argument.""" # test changing name. clone treats 'name' differently (see next test) c = cosmo.clone(name="cloned cosmo") assert c.name == "cloned cosmo" # changed # show name is the only thing changed c._name = cosmo.name # first change name back assert c == cosmo assert c.meta == cosmo.meta # now change a different parameter and see how 'name' changes c = cosmo.clone(meta={"test_clone_name": True}) assert c.name == cosmo.name + " (modified)" def test_clone_meta(self, cosmo): """Test method ``.clone()`` meta argument: updates meta, doesn't clear.""" # start with no change c = cosmo.clone(meta=None) assert c.meta == cosmo.meta # add something c = cosmo.clone(meta=dict(test_clone_meta=True)) assert c.meta["test_clone_meta"] is True c.meta.pop("test_clone_meta") # remove from meta assert c.meta == cosmo.meta # now they match def test_clone_change_param(self, cosmo): """ Test method ``.clone()`` changing a(many) Parameter(s). Nothing here b/c no Parameters. """ def test_clone_fail_unexpected_arg(self, cosmo): """Test when ``.clone()`` gets an unexpected argument.""" with pytest.raises(TypeError, match="unexpected keyword argument"): cosmo.clone(not_an_arg=4) def test_clone_fail_positional_arg(self, cosmo): with pytest.raises(TypeError, match="1 positional argument"): cosmo.clone(None) # --------------------------------------------------------------- # comparison methods def test_is_equivalent(self, cosmo): """Test :meth:`astropy.cosmology.Cosmology.is_equivalent`.""" # to self assert cosmo.is_equivalent(cosmo) # same class, different instance newclone = cosmo.clone(name="test_is_equivalent") assert cosmo.is_equivalent(newclone) assert newclone.is_equivalent(cosmo) # different class and not convertible to Cosmology. assert not cosmo.is_equivalent(2) def test_equality(self, cosmo): """Test method ``.__eq__().""" # wrong class assert (cosmo != 2) and (2 != cosmo) # correct assert cosmo == cosmo # different name <= not equal, but equivalent newcosmo = cosmo.clone(name="test_equality") assert (cosmo != newcosmo) and (newcosmo != cosmo) assert cosmo.__equiv__(newcosmo) and newcosmo.__equiv__(cosmo) # --------------------------------------------------------------- def test_repr(self, cosmo_cls, cosmo): """Test method ``.__repr__()``. This is a very general test and it is probably good to have a hard-coded comparison. """ r = repr(cosmo) # class in string rep assert cosmo_cls.__qualname__ in r assert r.index(cosmo_cls.__qualname__) == 0 # it's the first thing r = r[len(cosmo_cls.__qualname__) + 1 :] # remove # name in string rep if cosmo.name is not None: assert f'name="{cosmo.name}"' in r assert r.index("name=") == 0 r = r[6 + len(cosmo.name) + 3 :] # remove # parameters in string rep ps = {k: getattr(cosmo, k) for k in cosmo.__parameters__} for k, v in ps.items(): sv = f"{k}={v}" assert sv in r assert r.index(k) == 0 r = r[len(sv) + 2 :] # remove # ------------------------------------------------ @pytest.mark.parametrize("in_meta", [True, False]) @pytest.mark.parametrize("table_cls", [Table, QTable]) def test_astropy_table(self, cosmo, table_cls, in_meta): """Test ``astropy.table.Table(cosmology)``.""" tbl = table_cls(cosmo, cosmology_in_meta=in_meta) assert isinstance(tbl, table_cls) # the name & all parameters are columns for n in ("name", *cosmo.__parameters__): assert n in tbl.colnames assert np.all(tbl[n] == getattr(cosmo, n)) # check if Cosmology is in metadata or a column if in_meta: assert tbl.meta["cosmology"] == cosmo.__class__.__qualname__ assert "cosmology" not in tbl.colnames else: assert "cosmology" not in tbl.meta assert tbl["cosmology"][0] == cosmo.__class__.__qualname__ # the metadata is transferred for k, v in cosmo.meta.items(): assert np.all(tbl.meta[k] == v) # =============================================================== # Usage Tests def test_immutability(self, cosmo): """ Test immutability of cosmologies. The metadata is mutable: see ``test_meta_mutable``. """ for n in cosmo.__all_parameters__: with pytest.raises(AttributeError): setattr(cosmo, n, getattr(cosmo, n)) def test_pickle_class(self, cosmo_cls, pickle_protocol): """Test classes can pickle and unpickle.""" # pickle and unpickle f = pickle.dumps(cosmo_cls, protocol=pickle_protocol) unpickled = pickle.loads(f) # test equality assert unpickled == cosmo_cls def test_pickle_instance(self, cosmo, pickle_protocol): """Test instances can pickle and unpickle.""" # pickle and unpickle f = pickle.dumps(cosmo, protocol=pickle_protocol) with u.add_enabled_units(cu): unpickled = pickle.loads(f) assert unpickled == cosmo assert unpickled.meta == cosmo.meta class TestCosmology(CosmologyTest): """Test :class:`astropy.cosmology.Cosmology`. Subclasses should define tests for: - ``test_clone_change_param()`` - ``test_repr()`` """ def setup_class(self): """ Setup for testing. Cosmology should not be instantiated, so tests are done on a subclass. """ # make sure SubCosmology is known _COSMOLOGY_CLASSES["SubCosmology"] = SubCosmology self.cls = SubCosmology self._cls_args = dict( H0=70 * (u.km / u.s / u.Mpc), Tcmb0=2.7 * u.K, m_nu=0.6 * u.eV ) self.cls_kwargs = dict(name=self.__class__.__name__, meta={"a": "b"}) def teardown_class(self): """Teardown for testing.""" super().teardown_class(self) _COSMOLOGY_CLASSES.pop("SubCosmology", None) # =============================================================== # Method & Attribute Tests def test_is_flat(self, cosmo_cls, cosmo): """Test property ``is_flat``. It's an ABC.""" with pytest.raises(NotImplementedError, match="is_flat is not implemented"): cosmo.is_flat # ----------------------------------------------------------------------------- class FlatCosmologyMixinTest: """Tests for :class:`astropy.cosmology.core.FlatCosmologyMixin` subclasses. The test suite structure mirrors the implementation of the tested code. Just like :class:`astropy.cosmology.FlatCosmologyMixin` is an abstract base class (ABC) that cannot be used by itself, so too is this corresponding test class an ABC mixin. E.g to use this class:: class TestFlatSomeCosmology(FlatCosmologyMixinTest, TestSomeCosmology): ... """ def test_nonflat_class_(self, cosmo_cls, cosmo): """Test :attr:`astropy.cosmology.core.FlatCosmologyMixin.nonflat_cls`.""" # Test it's a method on the class assert issubclass(cosmo_cls, cosmo_cls.__nonflatclass__) # It also works from the instance. # TODO! as a "metaclassmethod" assert issubclass(cosmo_cls, cosmo.__nonflatclass__) # Maybe not the most robust test, but so far all Flat classes have the # name of their parent class. assert cosmo.__nonflatclass__.__name__ in cosmo_cls.__name__ def test_is_flat(self, cosmo_cls, cosmo): """Test property ``is_flat``.""" super().test_is_flat(cosmo_cls, cosmo) # it's always True assert cosmo.is_flat is True def test_nonflat(self, cosmo): """Test :attr:`astropy.cosmology.core.FlatCosmologyMixin.nonflat`.""" assert cosmo.nonflat.is_equivalent(cosmo) assert cosmo.is_equivalent(cosmo.nonflat) # ------------------------------------------------ # clone def test_clone_to_nonflat_equivalent(self, cosmo): """Test method ``.clone()``to_nonflat argument.""" # just converting the class nc = cosmo.clone(to_nonflat=True) assert isinstance(nc, cosmo.__nonflatclass__) assert nc == cosmo.nonflat @abc.abstractmethod def test_clone_to_nonflat_change_param(self, cosmo): """ Test method ``.clone()`` changing a(many) Parameter(s). No parameters are changed here because FlatCosmologyMixin has no Parameters. See class docstring for why this test method exists. """ # send to non-flat nc = cosmo.clone(to_nonflat=True) assert isinstance(nc, cosmo.__nonflatclass__) assert nc == cosmo.nonflat # ------------------------------------------------ def test_is_equivalent(self, cosmo): """Test :meth:`astropy.cosmology.core.FlatCosmologyMixin.is_equivalent`. Normally this would pass up via super(), but ``__equiv__`` is meant to be overridden, so we skip super(). e.g. FlatFLRWMixinTest -> FlatCosmologyMixinTest -> TestCosmology vs FlatFLRWMixinTest -> FlatCosmologyMixinTest -> TestFLRW -> TestCosmology """ CosmologyTest.test_is_equivalent(self, cosmo) # See FlatFLRWMixinTest for tests. It's a bit hard here since this class # is for an ABC. # =============================================================== # Usage Tests def test_subclassing(self, cosmo_cls): """Test when subclassing a flat cosmology.""" class SubClass1(cosmo_cls): pass # The classes have the same non-flat parent class assert SubClass1.__nonflatclass__ is cosmo_cls.__nonflatclass__ # A more complex example is when Mixin classes are used. class Mixin: pass class SubClass2(Mixin, cosmo_cls): pass # The classes have the same non-flat parent class assert SubClass2.__nonflatclass__ is cosmo_cls.__nonflatclass__ # The order of the Mixin should not matter class SubClass3(cosmo_cls, Mixin): pass # The classes have the same non-flat parent class assert SubClass3.__nonflatclass__ is cosmo_cls.__nonflatclass__ def test__nonflatclass__multiple_nonflat_inheritance(): """ Test :meth:`astropy.cosmology.core.FlatCosmologyMixin.__nonflatclass__` when there's more than one non-flat class in the inheritance. """ # Define a non-operable minimal subclass of Cosmology. class SubCosmology2(Cosmology): def __init__(self, H0, Tcmb0=0 * u.K, m_nu=0 * u.eV, name=None, meta=None): super().__init__(name=name, meta=meta) @property def is_flat(self): return False # Now make an ambiguous flat cosmology from the two SubCosmologies with pytest.raises(TypeError, match="cannot create a consistent non-flat class"): class FlatSubCosmology(FlatCosmologyMixin, SubCosmology, SubCosmology2): @property def nonflat(self): pass # ----------------------------------------------------------------------------- def test_flrw_moved_deprecation(): """Test the deprecation warning about the move of FLRW classes.""" from astropy.cosmology import flrw # it's deprecated to import `flrw/*` from `core.py` with pytest.warns(AstropyDeprecationWarning): from astropy.cosmology.core import FLRW # but they are the same object assert FLRW is flrw.FLRW
11883df81326dba53dd4c1c982a3b20418a7e198c34947d24e55a3980f4cd2b8
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Astropy FLRW classes.""" from . import base, lambdacdm, w0cdm, w0wacdm, w0wzcdm, wpwazpcdm from .base import * from .lambdacdm import * from .w0cdm import * from .w0wacdm import * from .w0wzcdm import * from .wpwazpcdm import * __all__ = ( base.__all__ + lambdacdm.__all__ + w0cdm.__all__ + w0wacdm.__all__ + wpwazpcdm.__all__ + w0wzcdm.__all__ ) def __getattr__(attr): """Lazy import deprecated private API.""" base_attrs = ( "H0units_to_invs", "a_B_c2", "critdens_const", "kB_evK", "radian_in_arcmin", "radian_in_arcsec", "sec_to_Gyr", ) if attr in base_attrs + ("quad",) + ("ellipkinc", "hyp2f1"): import warnings from astropy.utils.exceptions import AstropyDeprecationWarning from . import base, lambdacdm msg = ( f"`astropy.cosmology.flrw.{attr}` is a private variable (since " "v5.1) and in future will raise an exception." ) warnings.warn(msg, AstropyDeprecationWarning) if attr in base_attrs: return getattr(base, "_" + attr) elif attr == "quad": return getattr(base, attr) elif attr in ("ellipkinc", "hyp2f1"): return getattr(lambdacdm, attr) raise AttributeError(f"module {__name__!r} has no attribute {attr!r}.")
bdaa1af1758ecd572bdf98b9bfb7913418fee80172d0f801617156425549651f
# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import annotations import warnings from abc import abstractmethod from math import exp, floor, log, pi, sqrt from numbers import Number from typing import Any, Mapping, TypeVar import numpy as np from numpy import inf, sin import astropy.constants as const import astropy.units as u from astropy.cosmology.core import Cosmology, FlatCosmologyMixin from astropy.cosmology.parameter import ( Parameter, _validate_non_negative, _validate_with_unit, ) from astropy.cosmology.utils import aszarr, vectorize_redshift_method from astropy.utils.compat.optional_deps import HAS_SCIPY from astropy.utils.decorators import lazyproperty from astropy.utils.exceptions import AstropyUserWarning # isort: split if HAS_SCIPY: from scipy.integrate import quad else: def quad(*args, **kwargs): raise ModuleNotFoundError("No module named 'scipy.integrate'") __all__ = ["FLRW", "FlatFLRWMixin"] __doctest_requires__ = {"*": ["scipy"]} ############################################################################## # Parameters # Some conversion constants -- useful to compute them once here and reuse in # the initialization rather than have every object do them. _H0units_to_invs = (u.km / (u.s * u.Mpc)).to(1.0 / u.s) _sec_to_Gyr = u.s.to(u.Gyr) # const in critical density in cgs units (g cm^-3) _critdens_const = (3 / (8 * pi * const.G)).cgs.value # angle conversions _radian_in_arcsec = (1 * u.rad).to(u.arcsec) _radian_in_arcmin = (1 * u.rad).to(u.arcmin) # Radiation parameter over c^2 in cgs (g cm^-3 K^-4) _a_B_c2 = (4 * const.sigma_sb / const.c**3).cgs.value # Boltzmann constant in eV / K _kB_evK = const.k_B.to(u.eV / u.K) # typing _FLRWT = TypeVar("_FLRWT", bound="FLRW") _FlatFLRWMixinT = TypeVar("_FlatFLRWMixinT", bound="FlatFLRWMixin") ############################################################################## class FLRW(Cosmology): """ A class describing an isotropic and homogeneous (Friedmann-Lemaitre-Robertson-Walker) cosmology. This is an abstract base class -- you cannot instantiate examples of this class, but must work with one of its subclasses, such as :class:`~astropy.cosmology.LambdaCDM` or :class:`~astropy.cosmology.wCDM`. Parameters ---------- H0 : float or scalar quantity-like ['frequency'] Hubble constant at z = 0. If a float, must be in [km/sec/Mpc]. Om0 : float Omega matter: density of non-relativistic matter in units of the critical density at z=0. Note that this does not include massive neutrinos. Ode0 : float Omega dark energy: density of dark energy in units of the critical density at z=0. Tcmb0 : float or scalar quantity-like ['temperature'], optional Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K]. Setting this to zero will turn off both photons and neutrinos (even massive ones). Neff : float, optional Effective number of Neutrino species. Default 3.04. m_nu : quantity-like ['energy', 'mass'] or array-like, optional Mass of each neutrino species in [eV] (mass-energy equivalency enabled). If this is a scalar Quantity, then all neutrino species are assumed to have that mass. Otherwise, the mass of each species. The actual number of neutrino species (and hence the number of elements of m_nu if it is not scalar) must be the floor of Neff. Typically this means you should provide three neutrino masses unless you are considering something like a sterile neutrino. Ob0 : float or None, optional Omega baryons: density of baryonic matter in units of the critical density at z=0. If this is set to None (the default), any computation that requires its value will raise an exception. name : str or None (optional, keyword-only) Name for this cosmological object. meta : mapping or None (optional, keyword-only) Metadata for the cosmology, e.g., a reference. Notes ----- Class instances are immutable -- you cannot change the parameters' values. That is, all of the above attributes (except meta) are read only. For details on how to create performant custom subclasses, see the documentation on :ref:`astropy-cosmology-fast-integrals`. """ H0 = Parameter( doc="Hubble constant as an `~astropy.units.Quantity` at z=0.", unit="km/(s Mpc)", fvalidate="scalar", ) Om0 = Parameter( doc="Omega matter; matter density/critical density at z=0.", fvalidate="non-negative", ) Ode0 = Parameter( doc="Omega dark energy; dark energy density/critical density at z=0.", fvalidate="float", ) Tcmb0 = Parameter( doc="Temperature of the CMB as `~astropy.units.Quantity` at z=0.", unit="Kelvin", fvalidate="scalar", ) Neff = Parameter( doc="Number of effective neutrino species.", fvalidate="non-negative" ) m_nu = Parameter( doc="Mass of neutrino species.", unit="eV", equivalencies=u.mass_energy() ) Ob0 = Parameter( doc="Omega baryon; baryonic matter density/critical density at z=0." ) def __init__( self, H0, Om0, Ode0, Tcmb0=0.0 * u.K, Neff=3.04, m_nu=0.0 * u.eV, Ob0=None, *, name=None, meta=None, ): super().__init__(name=name, meta=meta) # Assign (and validate) Parameters self.H0 = H0 self.Om0 = Om0 self.Ode0 = Ode0 self.Tcmb0 = Tcmb0 self.Neff = Neff self.m_nu = m_nu # (reset later, this is just for unit validation) self.Ob0 = Ob0 # (must be after Om0) # Derived quantities: # Dark matter density; matter - baryons, if latter is not None. self._Odm0 = None if Ob0 is None else (self._Om0 - self._Ob0) # 100 km/s/Mpc * h = H0 (so h is dimensionless) self._h = self._H0.value / 100.0 # Hubble distance self._hubble_distance = (const.c / self._H0).to(u.Mpc) # H0 in s^-1 H0_s = self._H0.value * _H0units_to_invs # Hubble time self._hubble_time = (_sec_to_Gyr / H0_s) << u.Gyr # Critical density at z=0 (grams per cubic cm) cd0value = _critdens_const * H0_s**2 self._critical_density0 = cd0value << u.g / u.cm**3 # Compute photon density from Tcmb self._Ogamma0 = _a_B_c2 * self._Tcmb0.value**4 / self._critical_density0.value # Compute Neutrino temperature: # The constant in front is (4/11)^1/3 -- see any cosmology book for an # explanation -- for example, Weinberg 'Cosmology' p 154 eq (3.1.21). self._Tnu0 = 0.7137658555036082 * self._Tcmb0 # Compute neutrino parameters: if self._m_nu is None: self._nneutrinos = 0 self._neff_per_nu = None self._massivenu = False self._massivenu_mass = None self._nmassivenu = self._nmasslessnu = None else: self._nneutrinos = floor(self._Neff) # We are going to share Neff between the neutrinos equally. In # detail this is not correct, but it is a standard assumption # because properly calculating it is a) complicated b) depends on # the details of the massive neutrinos (e.g., their weak # interactions, which could be unusual if one is considering # sterile neutrinos). self._neff_per_nu = self._Neff / self._nneutrinos # Now figure out if we have massive neutrinos to deal with, and if # so, get the right number of masses. It is worth keeping track of # massless ones separately (since they are easy to deal with, and a # common use case is to have only one massive neutrino). massive = np.nonzero(self._m_nu.value > 0)[0] self._massivenu = massive.size > 0 self._nmassivenu = len(massive) self._massivenu_mass = ( self._m_nu[massive].value if self._massivenu else None ) self._nmasslessnu = self._nneutrinos - self._nmassivenu # Compute Neutrino Omega and total relativistic component for massive # neutrinos. We also store a list version, since that is more efficient # to do integrals with (perhaps surprisingly! But small python lists # are more efficient than small NumPy arrays). if self._massivenu: # (`_massivenu` set in `m_nu`) nu_y = self._massivenu_mass / (_kB_evK * self._Tnu0) self._nu_y = nu_y.value self._nu_y_list = self._nu_y.tolist() self._Onu0 = self._Ogamma0 * self.nu_relative_density(0) else: # This case is particularly simple, so do it directly The 0.2271... # is 7/8 (4/11)^(4/3) -- the temperature bit ^4 (blackbody energy # density) times 7/8 for FD vs. BE statistics. self._Onu0 = 0.22710731766 * self._Neff * self._Ogamma0 self._nu_y = self._nu_y_list = None # Compute curvature density self._Ok0 = 1.0 - self._Om0 - self._Ode0 - self._Ogamma0 - self._Onu0 # Subclasses should override this reference if they provide # more efficient scalar versions of inv_efunc. self._inv_efunc_scalar = self.inv_efunc self._inv_efunc_scalar_args = () # --------------------------------------------------------------- # Parameter details @Ob0.validator def Ob0(self, param, value): """Validate baryon density to None or positive float > matter density.""" if value is None: return value value = _validate_non_negative(self, param, value) if value > self.Om0: raise ValueError( "baryonic density can not be larger than total matter density." ) return value @m_nu.validator def m_nu(self, param, value): """Validate neutrino masses to right value, units, and shape. There are no neutrinos if floor(Neff) or Tcmb0 are 0. The number of neutrinos must match floor(Neff). Neutrino masses cannot be negative. """ # Check if there are any neutrinos if (nneutrinos := floor(self._Neff)) == 0 or self._Tcmb0.value == 0: return None # None, regardless of input # Validate / set units value = _validate_with_unit(self, param, value) # Check values and data shapes if value.shape not in ((), (nneutrinos,)): raise ValueError( "unexpected number of neutrino masses — " f"expected {nneutrinos}, got {len(value)}." ) elif np.any(value.value < 0): raise ValueError("invalid (negative) neutrino mass encountered.") # scalar -> array if value.isscalar: value = np.full_like(value, value, shape=nneutrinos) return value # --------------------------------------------------------------- # properties @property def is_flat(self): """Return bool; `True` if the cosmology is flat.""" return bool((self._Ok0 == 0.0) and (self.Otot0 == 1.0)) @property def Otot0(self): """Omega total; the total density/critical density at z=0.""" return self._Om0 + self._Ogamma0 + self._Onu0 + self._Ode0 + self._Ok0 @property def Odm0(self): """Omega dark matter; dark matter density/critical density at z=0.""" return self._Odm0 @property def Ok0(self): """Omega curvature; the effective curvature density/critical density at z=0.""" return self._Ok0 @property def Tnu0(self): """ Temperature of the neutrino background as `~astropy.units.Quantity` at z=0. """ return self._Tnu0 @property def has_massive_nu(self): """Does this cosmology have at least one massive neutrino species?""" if self._Tnu0.value == 0: return False return self._massivenu @property def h(self): """Dimensionless Hubble constant: h = H_0 / 100 [km/sec/Mpc].""" return self._h @property def hubble_time(self): """Hubble time as `~astropy.units.Quantity`.""" return self._hubble_time @property def hubble_distance(self): """Hubble distance as `~astropy.units.Quantity`.""" return self._hubble_distance @property def critical_density0(self): """Critical density as `~astropy.units.Quantity` at z=0.""" return self._critical_density0 @property def Ogamma0(self): """Omega gamma; the density/critical density of photons at z=0.""" return self._Ogamma0 @property def Onu0(self): """Omega nu; the density/critical density of neutrinos at z=0.""" return self._Onu0 # --------------------------------------------------------------- @abstractmethod def w(self, z): r"""The dark energy equation of state. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- w : ndarray or float The dark energy equation of state. `float` if scalar input. Notes ----- The dark energy equation of state is defined as :math:`w(z) = P(z)/\rho(z)`, where :math:`P(z)` is the pressure at redshift z and :math:`\rho(z)` is the density at redshift z, both in units where c=1. This must be overridden by subclasses. """ raise NotImplementedError("w(z) is not implemented") def Otot(self, z): """The total density parameter at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshifts. Returns ------- Otot : ndarray or float The total density relative to the critical density at each redshift. Returns float if input scalar. """ return self.Om(z) + self.Ogamma(z) + self.Onu(z) + self.Ode(z) + self.Ok(z) def Om(self, z): """ Return the density parameter for non-relativistic matter at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- Om : ndarray or float The density of non-relativistic matter relative to the critical density at each redshift. Returns `float` if the input is scalar. Notes ----- This does not include neutrinos, even if non-relativistic at the redshift of interest; see `Onu`. """ z = aszarr(z) return self._Om0 * (z + 1.0) ** 3 * self.inv_efunc(z) ** 2 def Ob(self, z): """Return the density parameter for baryonic matter at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- Ob : ndarray or float The density of baryonic matter relative to the critical density at each redshift. Returns `float` if the input is scalar. Raises ------ ValueError If ``Ob0`` is `None`. """ if self._Ob0 is None: raise ValueError("Baryon density not set for this cosmology") z = aszarr(z) return self._Ob0 * (z + 1.0) ** 3 * self.inv_efunc(z) ** 2 def Odm(self, z): """Return the density parameter for dark matter at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- Odm : ndarray or float The density of non-relativistic dark matter relative to the critical density at each redshift. Returns `float` if the input is scalar. Raises ------ ValueError If ``Ob0`` is `None`. Notes ----- This does not include neutrinos, even if non-relativistic at the redshift of interest. """ if self._Odm0 is None: raise ValueError( "Baryonic density not set for this cosmology, " "unclear meaning of dark matter density" ) z = aszarr(z) return self._Odm0 * (z + 1.0) ** 3 * self.inv_efunc(z) ** 2 def Ok(self, z): """ Return the equivalent density parameter for curvature at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- Ok : ndarray or float The equivalent density parameter for curvature at each redshift. Returns `float` if the input is scalar. """ z = aszarr(z) if self._Ok0 == 0: # Common enough to be worth checking explicitly return np.zeros(z.shape) if hasattr(z, "shape") else 0.0 return self._Ok0 * (z + 1.0) ** 2 * self.inv_efunc(z) ** 2 def Ode(self, z): """Return the density parameter for dark energy at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- Ode : ndarray or float The density of non-relativistic matter relative to the critical density at each redshift. Returns `float` if the input is scalar. """ z = aszarr(z) if self._Ode0 == 0: # Common enough to be worth checking explicitly return np.zeros(z.shape) if hasattr(z, "shape") else 0.0 return self._Ode0 * self.de_density_scale(z) * self.inv_efunc(z) ** 2 def Ogamma(self, z): """Return the density parameter for photons at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- Ogamma : ndarray or float The energy density of photons relative to the critical density at each redshift. Returns `float` if the input is scalar. """ z = aszarr(z) return self._Ogamma0 * (z + 1.0) ** 4 * self.inv_efunc(z) ** 2 def Onu(self, z): r"""Return the density parameter for neutrinos at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- Onu : ndarray or float The energy density of neutrinos relative to the critical density at each redshift. Note that this includes their kinetic energy (if they have mass), so it is not equal to the commonly used :math:`\sum \frac{m_{\nu}}{94 eV}`, which does not include kinetic energy. Returns `float` if the input is scalar. """ z = aszarr(z) if self._Onu0 == 0: # Common enough to be worth checking explicitly return np.zeros(z.shape) if hasattr(z, "shape") else 0.0 return self.Ogamma(z) * self.nu_relative_density(z) def Tcmb(self, z): """Return the CMB temperature at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- Tcmb : `~astropy.units.Quantity` ['temperature'] The temperature of the CMB in K. """ return self._Tcmb0 * (aszarr(z) + 1.0) def Tnu(self, z): """Return the neutrino temperature at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- Tnu : `~astropy.units.Quantity` ['temperature'] The temperature of the cosmic neutrino background in K. """ return self._Tnu0 * (aszarr(z) + 1.0) def nu_relative_density(self, z): r"""Neutrino density function relative to the energy density in photons. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- f : ndarray or float The neutrino density scaling factor relative to the density in photons at each redshift. Only returns `float` if z is scalar. Notes ----- The density in neutrinos is given by .. math:: \rho_{\nu} \left(a\right) = 0.2271 \, N_{eff} \, f\left(m_{\nu} a / T_{\nu 0} \right) \, \rho_{\gamma} \left( a \right) where .. math:: f \left(y\right) = \frac{120}{7 \pi^4} \int_0^{\infty} \, dx \frac{x^2 \sqrt{x^2 + y^2}} {e^x + 1} assuming that all neutrino species have the same mass. If they have different masses, a similar term is calculated for each one. Note that ``f`` has the asymptotic behavior :math:`f(0) = 1`. This method returns :math:`0.2271 f` using an analytical fitting formula given in Komatsu et al. 2011, ApJS 192, 18. """ # Note that there is also a scalar-z-only cython implementation of # this in scalar_inv_efuncs.pyx, so if you find a problem in this # you need to update there too. # See Komatsu et al. 2011, eq 26 and the surrounding discussion # for an explanation of what we are doing here. # However, this is modified to handle multiple neutrino masses # by computing the above for each mass, then summing prefac = 0.22710731766 # 7/8 (4/11)^4/3 -- see any cosmo book # The massive and massless contribution must be handled separately # But check for common cases first z = aszarr(z) if not self._massivenu: return ( prefac * self._Neff * (np.ones(z.shape) if hasattr(z, "shape") else 1.0) ) # These are purely fitting constants -- see the Komatsu paper p = 1.83 invp = 0.54644808743 # 1.0 / p k = 0.3173 curr_nu_y = self._nu_y / (1.0 + np.expand_dims(z, axis=-1)) rel_mass_per = (1.0 + (k * curr_nu_y) ** p) ** invp rel_mass = rel_mass_per.sum(-1) + self._nmasslessnu return prefac * self._neff_per_nu * rel_mass def _w_integrand(self, ln1pz): """Internal convenience function for w(z) integral (eq. 5 of [1]_). Parameters ---------- ln1pz : `~numbers.Number` or scalar ndarray Assumes scalar input, since this should only be called inside an integral. References ---------- .. [1] Linder, E. (2003). Exploring the Expansion History of the Universe. Phys. Rev. Lett., 90, 091301. """ return 1.0 + self.w(exp(ln1pz) - 1.0) def de_density_scale(self, z): r"""Evaluates the redshift dependence of the dark energy density. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- I : ndarray or float The scaling of the energy density of dark energy with redshift. Returns `float` if the input is scalar. Notes ----- The scaling factor, I, is defined by :math:`\rho(z) = \rho_0 I`, and is given by .. math:: I = \exp \left( 3 \int_{a}^1 \frac{ da^{\prime} }{ a^{\prime} } \left[ 1 + w\left( a^{\prime} \right) \right] \right) The actual integral used is rewritten from [1]_ to be in terms of z. It will generally helpful for subclasses to overload this method if the integral can be done analytically for the particular dark energy equation of state that they implement. References ---------- .. [1] Linder, E. (2003). Exploring the Expansion History of the Universe. Phys. Rev. Lett., 90, 091301. """ # This allows for an arbitrary w(z) following eq (5) of # Linder 2003, PRL 90, 91301. The code here evaluates # the integral numerically. However, most popular # forms of w(z) are designed to make this integral analytic, # so it is probably a good idea for subclasses to overload this # method if an analytic form is available. z = aszarr(z) if not isinstance(z, (Number, np.generic)): # array/Quantity ival = np.array( [quad(self._w_integrand, 0, log(1 + redshift))[0] for redshift in z] ) return np.exp(3 * ival) else: # scalar ival = quad(self._w_integrand, 0, log(z + 1.0))[0] return exp(3 * ival) def efunc(self, z): """Function used to calculate H(z), the Hubble parameter. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- E : ndarray or float The redshift scaling of the Hubble constant. Returns `float` if the input is scalar. Defined such that :math:`H(z) = H_0 E(z)`. Notes ----- It is not necessary to override this method, but if de_density_scale takes a particularly simple form, it may be advantageous to. """ Or = self._Ogamma0 + ( self._Onu0 if not self._massivenu else self._Ogamma0 * self.nu_relative_density(z) ) zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless]) return np.sqrt( zp1**2 * ((Or * zp1 + self._Om0) * zp1 + self._Ok0) + self._Ode0 * self.de_density_scale(z) ) def inv_efunc(self, z): """Inverse of ``efunc``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- E : ndarray or float The redshift scaling of the inverse Hubble constant. Returns `float` if the input is scalar. """ # Avoid the function overhead by repeating code Or = self._Ogamma0 + ( self._Onu0 if not self._massivenu else self._Ogamma0 * self.nu_relative_density(z) ) zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless]) return ( zp1**2 * ((Or * zp1 + self._Om0) * zp1 + self._Ok0) + self._Ode0 * self.de_density_scale(z) ) ** (-0.5) def _lookback_time_integrand_scalar(self, z): """Integrand of the lookback time (equation 30 of [1]_). Parameters ---------- z : float Input redshift. Returns ------- I : float The integrand for the lookback time. References ---------- .. [1] Hogg, D. (1999). Distance measures in cosmology, section 11. arXiv e-prints, astro-ph/9905116. """ return self._inv_efunc_scalar(z, *self._inv_efunc_scalar_args) / (z + 1.0) def lookback_time_integrand(self, z): """Integrand of the lookback time (equation 30 of [1]_). Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- I : float or array The integrand for the lookback time. References ---------- .. [1] Hogg, D. (1999). Distance measures in cosmology, section 11. arXiv e-prints, astro-ph/9905116. """ z = aszarr(z) return self.inv_efunc(z) / (z + 1.0) def _abs_distance_integrand_scalar(self, z): """Integrand of the absorption distance [1]_. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- X : float The integrand for the absorption distance. References ---------- .. [1] Hogg, D. (1999). Distance measures in cosmology, section 11. arXiv e-prints, astro-ph/9905116. """ args = self._inv_efunc_scalar_args return (z + 1.0) ** 2 * self._inv_efunc_scalar(z, *args) def abs_distance_integrand(self, z): """Integrand of the absorption distance [1]_. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- X : float or array The integrand for the absorption distance. References ---------- .. [1] Hogg, D. (1999). Distance measures in cosmology, section 11. arXiv e-prints, astro-ph/9905116. """ z = aszarr(z) return (z + 1.0) ** 2 * self.inv_efunc(z) def H(self, z): """Hubble parameter (km/s/Mpc) at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- H : `~astropy.units.Quantity` ['frequency'] Hubble parameter at each input redshift. """ return self._H0 * self.efunc(z) def scale_factor(self, z): """Scale factor at redshift ``z``. The scale factor is defined as :math:`a = 1 / (1 + z)`. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- a : ndarray or float Scale factor at each input redshift. Returns `float` if the input is scalar. """ return 1.0 / (aszarr(z) + 1.0) def lookback_time(self, z): """Lookback time in Gyr to redshift ``z``. The lookback time is the difference between the age of the Universe now and the age at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- t : `~astropy.units.Quantity` ['time'] Lookback time in Gyr to each input redshift. See Also -------- z_at_value : Find the redshift corresponding to a lookback time. """ return self._lookback_time(z) def _lookback_time(self, z): """Lookback time in Gyr to redshift ``z``. The lookback time is the difference between the age of the Universe now and the age at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- t : `~astropy.units.Quantity` ['time'] Lookback time in Gyr to each input redshift. """ return self._hubble_time * self._integral_lookback_time(z) @vectorize_redshift_method def _integral_lookback_time(self, z, /): """Lookback time to redshift ``z``. Value in units of Hubble time. The lookback time is the difference between the age of the Universe now and the age at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- t : float or ndarray Lookback time to each input redshift in Hubble time units. Returns `float` if input scalar, `~numpy.ndarray` otherwise. """ return quad(self._lookback_time_integrand_scalar, 0, z)[0] def lookback_distance(self, z): """ The lookback distance is the light travel time distance to a given redshift. It is simply c * lookback_time. It may be used to calculate the proper distance between two redshifts, e.g. for the mean free path to ionizing radiation. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- d : `~astropy.units.Quantity` ['length'] Lookback distance in Mpc """ return (self.lookback_time(z) * const.c).to(u.Mpc) def age(self, z): """Age of the universe in Gyr at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- t : `~astropy.units.Quantity` ['time'] The age of the universe in Gyr at each input redshift. See Also -------- z_at_value : Find the redshift corresponding to an age. """ return self._age(z) def _age(self, z): """Age of the universe in Gyr at redshift ``z``. This internal function exists to be re-defined for optimizations. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- t : `~astropy.units.Quantity` ['time'] The age of the universe in Gyr at each input redshift. """ return self._hubble_time * self._integral_age(z) @vectorize_redshift_method def _integral_age(self, z, /): """Age of the universe at redshift ``z``. Value in units of Hubble time. Calculated using explicit integration. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- t : float or ndarray The age of the universe at each input redshift in Hubble time units. Returns `float` if input scalar, `~numpy.ndarray` otherwise. See Also -------- z_at_value : Find the redshift corresponding to an age. """ return quad(self._lookback_time_integrand_scalar, z, inf)[0] def critical_density(self, z): """Critical density in grams per cubic cm at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- rho : `~astropy.units.Quantity` Critical density in g/cm^3 at each input redshift. """ return self._critical_density0 * (self.efunc(z)) ** 2 def comoving_distance(self, z): """Comoving line-of-sight distance in Mpc at a given redshift. The comoving distance along the line-of-sight between two objects remains constant with time for objects in the Hubble flow. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- d : `~astropy.units.Quantity` ['length'] Comoving distance in Mpc to each input redshift. """ return self._comoving_distance_z1z2(0, z) def _comoving_distance_z1z2(self, z1, z2): """ Comoving line-of-sight distance in Mpc between objects at redshifts ``z1`` and ``z2``. The comoving distance along the line-of-sight between two objects remains constant with time for objects in the Hubble flow. Parameters ---------- z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshifts. Returns ------- d : `~astropy.units.Quantity` ['length'] Comoving distance in Mpc between each input redshift. """ return self._integral_comoving_distance_z1z2(z1, z2) @vectorize_redshift_method(nin=2) def _integral_comoving_distance_z1z2_scalar(self, z1, z2, /): """ Comoving line-of-sight distance between objects at redshifts ``z1`` and ``z2``. Value in Mpc. The comoving distance along the line-of-sight between two objects remains constant with time for objects in the Hubble flow. Parameters ---------- z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshifts. Returns ------- d : float or ndarray Comoving distance in Mpc between each input redshift. Returns `float` if input scalar, `~numpy.ndarray` otherwise. """ return quad(self._inv_efunc_scalar, z1, z2, args=self._inv_efunc_scalar_args)[0] def _integral_comoving_distance_z1z2(self, z1, z2): """ Comoving line-of-sight distance in Mpc between objects at redshifts ``z1`` and ``z2``. The comoving distance along the line-of-sight between two objects remains constant with time for objects in the Hubble flow. Parameters ---------- z1, z2 : Quantity-like ['redshift'] or array-like Input redshifts. Returns ------- d : `~astropy.units.Quantity` ['length'] Comoving distance in Mpc between each input redshift. """ return self._hubble_distance * self._integral_comoving_distance_z1z2_scalar(z1, z2) # fmt: skip def comoving_transverse_distance(self, z): r"""Comoving transverse distance in Mpc at a given redshift. This value is the transverse comoving distance at redshift ``z`` corresponding to an angular separation of 1 radian. This is the same as the comoving distance if :math:`\Omega_k` is zero (as in the current concordance Lambda-CDM model). Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- d : `~astropy.units.Quantity` ['length'] Comoving transverse distance in Mpc at each input redshift. Notes ----- This quantity is also called the 'proper motion distance' in some texts. """ return self._comoving_transverse_distance_z1z2(0, z) def _comoving_transverse_distance_z1z2(self, z1, z2): r"""Comoving transverse distance in Mpc between two redshifts. This value is the transverse comoving distance at redshift ``z2`` as seen from redshift ``z1`` corresponding to an angular separation of 1 radian. This is the same as the comoving distance if :math:`\Omega_k` is zero (as in the current concordance Lambda-CDM model). Parameters ---------- z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshifts. Returns ------- d : `~astropy.units.Quantity` ['length'] Comoving transverse distance in Mpc between input redshift. Notes ----- This quantity is also called the 'proper motion distance' in some texts. """ Ok0 = self._Ok0 dc = self._comoving_distance_z1z2(z1, z2) if Ok0 == 0: return dc sqrtOk0 = sqrt(abs(Ok0)) dh = self._hubble_distance if Ok0 > 0: return dh / sqrtOk0 * np.sinh(sqrtOk0 * dc.value / dh.value) else: return dh / sqrtOk0 * sin(sqrtOk0 * dc.value / dh.value) def angular_diameter_distance(self, z): """Angular diameter distance in Mpc at a given redshift. This gives the proper (sometimes called 'physical') transverse distance corresponding to an angle of 1 radian for an object at redshift ``z`` ([1]_, [2]_, [3]_). Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- d : `~astropy.units.Quantity` ['length'] Angular diameter distance in Mpc at each input redshift. References ---------- .. [1] Weinberg, 1972, pp 420-424; Weedman, 1986, pp 421-424. .. [2] Weedman, D. (1986). Quasar astronomy, pp 65-67. .. [3] Peebles, P. (1993). Principles of Physical Cosmology, pp 325-327. """ z = aszarr(z) return self.comoving_transverse_distance(z) / (z + 1.0) def luminosity_distance(self, z): """Luminosity distance in Mpc at redshift ``z``. This is the distance to use when converting between the bolometric flux from an object at redshift ``z`` and its bolometric luminosity [1]_. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- d : `~astropy.units.Quantity` ['length'] Luminosity distance in Mpc at each input redshift. See Also -------- z_at_value : Find the redshift corresponding to a luminosity distance. References ---------- .. [1] Weinberg, 1972, pp 420-424; Weedman, 1986, pp 60-62. """ z = aszarr(z) return (z + 1.0) * self.comoving_transverse_distance(z) def angular_diameter_distance_z1z2(self, z1, z2): """Angular diameter distance between objects at 2 redshifts. Useful for gravitational lensing, for example computing the angular diameter distance between a lensed galaxy and the foreground lens. Parameters ---------- z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshifts. For most practical applications such as gravitational lensing, ``z2`` should be larger than ``z1``. The method will work for ``z2 < z1``; however, this will return negative distances. Returns ------- d : `~astropy.units.Quantity` The angular diameter distance between each input redshift pair. Returns scalar if input is scalar, array else-wise. """ z1, z2 = aszarr(z1), aszarr(z2) if np.any(z2 < z1): warnings.warn( f"Second redshift(s) z2 ({z2}) is less than first " f"redshift(s) z1 ({z1}).", AstropyUserWarning, ) return self._comoving_transverse_distance_z1z2(z1, z2) / (z2 + 1.0) @vectorize_redshift_method def absorption_distance(self, z, /): """Absorption distance at redshift ``z``. This is used to calculate the number of objects with some cross section of absorption and number density intersecting a sightline per unit redshift path ([1]_, [2]_). Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- d : float or ndarray Absorption distance (dimensionless) at each input redshift. Returns `float` if input scalar, `~numpy.ndarray` otherwise. References ---------- .. [1] Hogg, D. (1999). Distance measures in cosmology, section 11. arXiv e-prints, astro-ph/9905116. .. [2] Bahcall, John N. and Peebles, P.J.E. 1969, ApJ, 156L, 7B """ return quad(self._abs_distance_integrand_scalar, 0, z)[0] def distmod(self, z): """Distance modulus at redshift ``z``. The distance modulus is defined as the (apparent magnitude - absolute magnitude) for an object at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- distmod : `~astropy.units.Quantity` ['length'] Distance modulus at each input redshift, in magnitudes. See Also -------- z_at_value : Find the redshift corresponding to a distance modulus. """ # Remember that the luminosity distance is in Mpc # Abs is necessary because in certain obscure closed cosmologies # the distance modulus can be negative -- which is okay because # it enters as the square. val = 5.0 * np.log10(abs(self.luminosity_distance(z).value)) + 25.0 return u.Quantity(val, u.mag) def comoving_volume(self, z): r"""Comoving volume in cubic Mpc at redshift ``z``. This is the volume of the universe encompassed by redshifts less than ``z``. For the case of :math:`\Omega_k = 0` it is a sphere of radius `comoving_distance` but it is less intuitive if :math:`\Omega_k` is not. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- V : `~astropy.units.Quantity` Comoving volume in :math:`Mpc^3` at each input redshift. """ Ok0 = self._Ok0 if Ok0 == 0: return 4.0 / 3.0 * pi * self.comoving_distance(z) ** 3 dh = self._hubble_distance.value # .value for speed dm = self.comoving_transverse_distance(z).value term1 = 4.0 * pi * dh**3 / (2.0 * Ok0) * u.Mpc**3 term2 = dm / dh * np.sqrt(1 + Ok0 * (dm / dh) ** 2) term3 = sqrt(abs(Ok0)) * dm / dh if Ok0 > 0: return term1 * (term2 - 1.0 / sqrt(abs(Ok0)) * np.arcsinh(term3)) else: return term1 * (term2 - 1.0 / sqrt(abs(Ok0)) * np.arcsin(term3)) def differential_comoving_volume(self, z): """Differential comoving volume at redshift z. Useful for calculating the effective comoving volume. For example, allows for integration over a comoving volume that has a sensitivity function that changes with redshift. The total comoving volume is given by integrating ``differential_comoving_volume`` to redshift ``z`` and multiplying by a solid angle. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- dV : `~astropy.units.Quantity` Differential comoving volume per redshift per steradian at each input redshift. """ dm = self.comoving_transverse_distance(z) return self._hubble_distance * (dm**2.0) / (self.efunc(z) << u.steradian) def kpc_comoving_per_arcmin(self, z): """ Separation in transverse comoving kpc corresponding to an arcminute at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- d : `~astropy.units.Quantity` ['length'] The distance in comoving kpc corresponding to an arcmin at each input redshift. """ return self.comoving_transverse_distance(z).to(u.kpc) / _radian_in_arcmin def kpc_proper_per_arcmin(self, z): """ Separation in transverse proper kpc corresponding to an arcminute at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- d : `~astropy.units.Quantity` ['length'] The distance in proper kpc corresponding to an arcmin at each input redshift. """ return self.angular_diameter_distance(z).to(u.kpc) / _radian_in_arcmin def arcsec_per_kpc_comoving(self, z): """ Angular separation in arcsec corresponding to a comoving kpc at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- theta : `~astropy.units.Quantity` ['angle'] The angular separation in arcsec corresponding to a comoving kpc at each input redshift. """ return _radian_in_arcsec / self.comoving_transverse_distance(z).to(u.kpc) def arcsec_per_kpc_proper(self, z): """ Angular separation in arcsec corresponding to a proper kpc at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshift. Returns ------- theta : `~astropy.units.Quantity` ['angle'] The angular separation in arcsec corresponding to a proper kpc at each input redshift. """ return _radian_in_arcsec / self.angular_diameter_distance(z).to(u.kpc) class FlatFLRWMixin(FlatCosmologyMixin): """ Mixin class for flat FLRW cosmologies. Do NOT instantiate directly. Must precede the base class in the multiple-inheritance so that this mixin's ``__init__`` proceeds the base class'. Note that all instances of ``FlatFLRWMixin`` are flat, but not all flat cosmologies are instances of ``FlatFLRWMixin``. As example, ``LambdaCDM`` **may** be flat (for the a specific set of parameter values), but ``FlatLambdaCDM`` **will** be flat. """ Ode0 = FLRW.Ode0.clone(derived=True) # same as FLRW, but now a derived param. def __init_subclass__(cls): super().__init_subclass__() if "Ode0" in cls._init_signature.parameters: raise TypeError( "subclasses of `FlatFLRWMixin` cannot have `Ode0` in `__init__`" ) def __init__(self, *args, **kw): super().__init__(*args, **kw) # guaranteed not to have `Ode0` # Do some twiddling after the fact to get flatness self._Ok0 = 0.0 self._Ode0 = 1.0 - (self._Om0 + self._Ogamma0 + self._Onu0 + self._Ok0) @lazyproperty def nonflat(self: _FlatFLRWMixinT) -> _FLRWT: # Create BoundArgument to handle args versus kwargs. # This also handles all errors from mismatched arguments ba = self.__nonflatclass__._init_signature.bind_partial( **self._init_arguments, Ode0=self.Ode0 ) # Make new instance, respecting args vs kwargs inst = self.__nonflatclass__(*ba.args, **ba.kwargs) # Because of machine precision, make sure parameters exactly match for n in inst.__all_parameters__ + ("Ok0",): setattr(inst, "_" + n, getattr(self, n)) return inst def clone( self, *, meta: Mapping | None = None, to_nonflat: bool = None, **kwargs: Any ): """Returns a copy of this object with updated parameters, as specified. This cannot be used to change the type of the cosmology, except for changing to the non-flat version of this cosmology. Parameters ---------- meta : mapping or None (optional, keyword-only) Metadata that will update the current metadata. to_nonflat : bool or None, optional keyword-only Whether to change to the non-flat version of this cosmology. **kwargs Cosmology parameter (and name) modifications. If any parameter is changed and a new name is not given, the name will be set to "[old name] (modified)". Returns ------- newcosmo : `~astropy.cosmology.Cosmology` subclass instance A new instance of this class with updated parameters as specified. If no arguments are given, then a reference to this object is returned instead of copy. Examples -------- To make a copy of the ``Planck13`` cosmology with a different matter density (``Om0``), and a new name: >>> from astropy.cosmology import Planck13 >>> Planck13.clone(name="Modified Planck 2013", Om0=0.35) FlatLambdaCDM(name="Modified Planck 2013", H0=67.77 km / (Mpc s), Om0=0.35, ... If no name is specified, the new name will note the modification. >>> Planck13.clone(Om0=0.35).name 'Planck13 (modified)' The keyword 'to_nonflat' can be used to clone on the non-flat equivalent cosmology. >>> Planck13.clone(to_nonflat=True) LambdaCDM(name="Planck13", ... >>> Planck13.clone(H0=70, to_nonflat=True) LambdaCDM(name="Planck13 (modified)", H0=70.0 km / (Mpc s), ... With 'to_nonflat' `True`, ``Ode0`` can be modified. >>> Planck13.clone(to_nonflat=True, Ode0=1) LambdaCDM(name="Planck13 (modified)", H0=67.77 km / (Mpc s), Om0=0.30712, Ode0=1.0, ... """ return super().clone(meta=meta, to_nonflat=to_nonflat, **kwargs) @property def Otot0(self): """Omega total; the total density/critical density at z=0.""" return 1.0 def Otot(self, z): """The total density parameter at redshift ``z``. Parameters ---------- z : Quantity-like ['redshift'], array-like, or `~numbers.Number` Input redshifts. Returns ------- Otot : ndarray or float Returns float if input scalar. Value of 1. """ return ( 1.0 if isinstance(z, (Number, np.generic)) else np.ones_like(z, subok=False) )
9e5a172cb62fecbf5b9dcefba82b6a97ac10a8d6f902da31d74bb06f6f027126
# Licensed under a 3-clause BSD style license - see LICENSE.rst # STDLIB import inspect import random # THIRD PARTY import numpy as np import pytest # LOCAL from astropy.cosmology.core import Cosmology from astropy.cosmology.io.model import _CosmologyModel, from_model, to_model from astropy.cosmology.tests.helper import get_redshift_methods from astropy.modeling.models import Gaussian1D from astropy.utils.compat.optional_deps import HAS_SCIPY from .base import ToFromDirectTestBase, ToFromTestMixinBase ############################################################################### class ToFromModelTestMixin(ToFromTestMixinBase): """Tests for a Cosmology[To/From]Format with ``format="astropy.model"``. This class will not be directly called by :mod:`pytest` since its name does not begin with ``Test``. To activate the contained tests this class must be inherited in a subclass. Subclasses must define a :func:`pytest.fixture` ``cosmo`` that returns/yields an instance of a |Cosmology|. See ``TestCosmologyToFromFormat`` or ``TestCosmology`` for examples. """ @pytest.fixture(scope="class") def method_name(self, cosmo): # get methods, ignoring private and dunder methods = get_redshift_methods(cosmo, include_private=False, include_z2=True) # dynamically detect ABC and optional dependencies for n in tuple(methods): params = inspect.signature(getattr(cosmo, n)).parameters.keys() ERROR_SEIVE = (NotImplementedError, ValueError) # # ABC can't introspect for good input if not HAS_SCIPY: ERROR_SEIVE = ERROR_SEIVE + (ModuleNotFoundError,) args = np.arange(len(params)) + 1 try: getattr(cosmo, n)(*args) except ERROR_SEIVE: methods.discard(n) # TODO! pytest doesn't currently allow multiple yields (`cosmo`) so # testing with 1 random method # yield from methods return random.choice(tuple(methods)) if methods else None # =============================================================== def test_fromformat_model_wrong_cls(self, from_format): """Test when Model is not the correct class.""" model = Gaussian1D(amplitude=10, mean=14) with pytest.raises(AttributeError): from_format(model) def test_toformat_model_not_method(self, to_format): """Test when method is not a method.""" with pytest.raises(AttributeError): to_format("astropy.model", method="this is definitely not a method.") def test_toformat_model_not_callable(self, to_format): """Test when method is actually an attribute.""" with pytest.raises(ValueError): to_format("astropy.model", method="name") def test_toformat_model(self, cosmo, to_format, method_name): """Test cosmology -> astropy.model.""" if method_name is None: # no test if no method return model = to_format("astropy.model", method=method_name) assert isinstance(model, _CosmologyModel) # Parameters expect = tuple(n for n in cosmo.__parameters__ if getattr(cosmo, n) is not None) assert model.param_names == expect # scalar result args = np.arange(model.n_inputs) + 1 got = model.evaluate(*args) expected = getattr(cosmo, method_name)(*args) assert np.all(got == expected) got = model(*args) expected = getattr(cosmo, method_name)(*args) np.testing.assert_allclose(got, expected) # vector result if "scalar" not in method_name: args = (np.ones((model.n_inputs, 3)).T + np.arange(model.n_inputs)).T got = model.evaluate(*args) expected = getattr(cosmo, method_name)(*args) assert np.all(got == expected) got = model(*args) expected = getattr(cosmo, method_name)(*args) np.testing.assert_allclose(got, expected) def test_tofromformat_model_instance( self, cosmo_cls, cosmo, method_name, to_format, from_format ): """Test cosmology -> astropy.model -> cosmology.""" if method_name is None: # no test if no method return # ------------ # To Model # this also serves as a test of all added methods / attributes # in _CosmologyModel. model = to_format("astropy.model", method=method_name) assert isinstance(model, _CosmologyModel) assert model.cosmology_class is cosmo_cls assert model.cosmology == cosmo assert model.method_name == method_name # ------------ # From Model # it won't error if everything matches up got = from_format(model, format="astropy.model") assert got == cosmo assert set(cosmo.meta.keys()).issubset(got.meta.keys()) # Note: model adds parameter attributes to the metadata # also it auto-identifies 'format' got = from_format(model) assert got == cosmo assert set(cosmo.meta.keys()).issubset(got.meta.keys()) def test_fromformat_model_subclass_partial_info(self): """ Test writing from an instance and reading from that class. This works with missing information. """ pass # there's no partial information with a Model @pytest.mark.parametrize("format", [True, False, None, "astropy.model"]) def test_is_equivalent_to_model(self, cosmo, method_name, to_format, format): """Test :meth:`astropy.cosmology.Cosmology.is_equivalent`. This test checks that Cosmology equivalency can be extended to any Python object that can be converted to a Cosmology -- in this case a model. """ if method_name is None: # no test if no method return obj = to_format("astropy.model", method=method_name) assert not isinstance(obj, Cosmology) is_equiv = cosmo.is_equivalent(obj, format=format) assert is_equiv is (format is not False) class TestToFromModel(ToFromDirectTestBase, ToFromModelTestMixin): """Directly test ``to/from_model``.""" def setup_class(self): self.functions = {"to": to_model, "from": from_model}
12682d68b34adb8134af074e0c1639a5ffd3aa59e82b3f3037730aa2cdad5e88
# Licensed under a 3-clause BSD style license - see LICENSE.rst # STDLIB import inspect from collections import OrderedDict # THIRD PARTY import numpy as np import pytest # LOCAL from astropy.cosmology import Cosmology from astropy.cosmology.io.mapping import from_mapping, to_mapping from .base import ToFromDirectTestBase, ToFromTestMixinBase ############################################################################### class ToFromMappingTestMixin(ToFromTestMixinBase): """Tests for a Cosmology[To/From]Format with ``format="mapping"``. This class will not be directly called by :mod:`pytest` since its name does not begin with ``Test``. To activate the contained tests this class must be inherited in a subclass. Subclasses must define a :func:`pytest.fixture` ``cosmo`` that returns/yields an instance of a |Cosmology|. See ``TestCosmology`` for an example. """ def test_to_mapping_default(self, cosmo, to_format): """Test default usage of Cosmology -> mapping.""" m = to_format("mapping") keys = tuple(m.keys()) assert isinstance(m, dict) # Check equality of all expected items assert keys[0] == "cosmology" assert m.pop("cosmology") is cosmo.__class__ assert keys[1] == "name" assert m.pop("name") == cosmo.name for i, k in enumerate(cosmo.__parameters__, start=2): assert keys[i] == k assert np.array_equal(m.pop(k), getattr(cosmo, k)) assert keys[-1] == "meta" assert m.pop("meta") == cosmo.meta # No unexpected items assert not m def test_to_mapping_wrong_cls(self, to_format): """Test incorrect argument ``cls`` in ``to_mapping()``.""" with pytest.raises(TypeError, match="'cls' must be"): to_format("mapping", cls=list) @pytest.mark.parametrize("map_cls", [dict, OrderedDict]) def test_to_mapping_cls(self, to_format, map_cls): """Test argument ``cls`` in ``to_mapping()``.""" m = to_format("mapping", cls=map_cls) assert isinstance(m, map_cls) # test type def test_to_mapping_cosmology_as_str(self, cosmo_cls, to_format): """Test argument ``cosmology_as_str`` in ``to_mapping()``.""" default = to_format("mapping") # Cosmology is the class m = to_format("mapping", cosmology_as_str=False) assert inspect.isclass(m["cosmology"]) assert cosmo_cls is m["cosmology"] assert m == default # False is the default option # Cosmology is a string m = to_format("mapping", cosmology_as_str=True) assert isinstance(m["cosmology"], str) assert m["cosmology"] == cosmo_cls.__qualname__ # Correct class assert tuple(m.keys())[0] == "cosmology" # Stayed at same index def test_tofrom_mapping_cosmology_as_str(self, cosmo, to_format, from_format): """Test roundtrip with ``cosmology_as_str=True``. The test for the default option (`False`) is in ``test_tofrom_mapping_instance``. """ m = to_format("mapping", cosmology_as_str=True) got = from_format(m, format="mapping") assert got == cosmo assert got.meta == cosmo.meta def test_to_mapping_move_from_meta(self, to_format): """Test argument ``move_from_meta`` in ``to_mapping()``.""" default = to_format("mapping") # Metadata is 'separate' from main mapping m = to_format("mapping", move_from_meta=False) assert "meta" in m.keys() assert not any([k in m for k in m["meta"]]) # Not added to main assert m == default # False is the default option # Metadata is mixed into main mapping. m = to_format("mapping", move_from_meta=True) assert "meta" not in m.keys() assert all([k in m for k in default["meta"]]) # All added to main # The parameters take precedence over the metadata assert all([np.array_equal(v, m[k]) for k, v in default.items() if k != "meta"]) def test_tofrom_mapping_move_tofrom_meta(self, cosmo, to_format, from_format): """Test roundtrip of ``move_from/to_meta`` in ``to/from_mapping()``.""" # Metadata is mixed into main mapping. m = to_format("mapping", move_from_meta=True) # (Just adding something to ensure there's 'metadata') m["mismatching"] = "will error" # (Tests are different if the last argument is a **kwarg) if tuple(cosmo._init_signature.parameters.values())[-1].kind == 4: got = from_format(m, format="mapping") assert got.name == cosmo.name assert "mismatching" not in got.meta return # don't continue testing # Reading with mismatching parameters errors... with pytest.raises(TypeError, match="there are unused parameters"): from_format(m, format="mapping") # unless mismatched are moved to meta. got = from_format(m, format="mapping", move_to_meta=True) assert got == cosmo # (Doesn't check metadata) assert got.meta["mismatching"] == "will error" # ----------------------------------------------------- def test_from_not_mapping(self, cosmo, from_format): """Test incorrect map type in ``from_mapping()``.""" with pytest.raises((TypeError, ValueError)): from_format("NOT A MAP", format="mapping") def test_from_mapping_default(self, cosmo, to_format, from_format): """Test (cosmology -> Mapping) -> cosmology.""" m = to_format("mapping") # Read from exactly as given. got = from_format(m, format="mapping") assert got == cosmo assert got.meta == cosmo.meta # Reading auto-identifies 'format' got = from_format(m) assert got == cosmo assert got.meta == cosmo.meta def test_fromformat_subclass_partial_info_mapping(self, cosmo): """ Test writing from an instance and reading from that class. This works with missing information. """ m = cosmo.to_format("mapping") # partial information m.pop("cosmology", None) m.pop("Tcmb0", None) # read with the same class that wrote fills in the missing info with # the default value got = cosmo.__class__.from_format(m, format="mapping") got2 = Cosmology.from_format(m, format="mapping", cosmology=cosmo.__class__) got3 = Cosmology.from_format( m, format="mapping", cosmology=cosmo.__class__.__qualname__ ) assert (got == got2) and (got2 == got3) # internal consistency # not equal, because Tcmb0 is changed, which also changes m_nu assert got != cosmo assert got.Tcmb0 == cosmo.__class__._init_signature.parameters["Tcmb0"].default assert got.clone(name=cosmo.name, Tcmb0=cosmo.Tcmb0, m_nu=cosmo.m_nu) == cosmo # but the metadata is the same assert got.meta == cosmo.meta @pytest.mark.parametrize("format", [True, False, None, "mapping"]) def test_is_equivalent_to_mapping(self, cosmo, to_format, format): """Test :meth:`astropy.cosmology.Cosmology.is_equivalent`. This test checks that Cosmology equivalency can be extended to any Python object that can be converted to a Cosmology -- in this case a mapping. """ obj = to_format("mapping") assert not isinstance(obj, Cosmology) is_equiv = cosmo.is_equivalent(obj, format=format) assert is_equiv is (format is not False) class TestToFromMapping(ToFromDirectTestBase, ToFromMappingTestMixin): """Directly test ``to/from_mapping``.""" def setup_class(self): self.functions = {"to": to_mapping, "from": from_mapping} @pytest.mark.skip("N/A") def test_fromformat_subclass_partial_info_mapping(self): """This test does not apply to the direct functions."""
929f06808fc8ef2b12e92ece4770566e5e9ac20f5e5ed048abfa3d128c8c8c39
# Licensed under a 3-clause BSD style license - see LICENSE.rst # THIRD PARTY import pytest # LOCAL from astropy.cosmology import Cosmology from astropy.cosmology.core import _COSMOLOGY_CLASSES from astropy.cosmology.io.table import from_table, to_table from astropy.table import QTable, Table, vstack from .base import ToFromDirectTestBase, ToFromTestMixinBase ############################################################################### class ToFromTableTestMixin(ToFromTestMixinBase): """ Tests for a Cosmology[To/From]Format with ``format="astropy.table"``. This class will not be directly called by :mod:`pytest` since its name does not begin with ``Test``. To activate the contained tests this class must be inherited in a subclass. Subclasses must define a :func:`pytest.fixture` ``cosmo`` that returns/yields an instance of a |Cosmology|. See ``TestCosmology`` for an example. """ def test_to_table_bad_index(self, from_format, to_format): """Test if argument ``index`` is incorrect""" tbl = to_format("astropy.table") # single-row table and has a non-0/None index with pytest.raises(IndexError, match="index 2 out of range"): from_format(tbl, index=2, format="astropy.table") # string index where doesn't match with pytest.raises(KeyError, match="No matches found for key"): from_format(tbl, index="row 0", format="astropy.table") # ----------------------- def test_to_table_failed_cls(self, to_format): """Test failed table type.""" with pytest.raises(TypeError, match="'cls' must be"): to_format("astropy.table", cls=list) @pytest.mark.parametrize("tbl_cls", [QTable, Table]) def test_to_table_cls(self, to_format, tbl_cls): tbl = to_format("astropy.table", cls=tbl_cls) assert isinstance(tbl, tbl_cls) # test type # ----------------------- @pytest.mark.parametrize("in_meta", [True, False]) def test_to_table_in_meta(self, cosmo_cls, to_format, in_meta): """Test where the cosmology class is placed.""" tbl = to_format("astropy.table", cosmology_in_meta=in_meta) # if it's in metadata, it's not a column. And vice versa. if in_meta: assert tbl.meta["cosmology"] == cosmo_cls.__qualname__ assert "cosmology" not in tbl.colnames # not also a column else: assert tbl["cosmology"][0] == cosmo_cls.__qualname__ assert "cosmology" not in tbl.meta # ----------------------- def test_to_table(self, cosmo_cls, cosmo, to_format): """Test cosmology -> astropy.table.""" tbl = to_format("astropy.table") # Test properties of Table. assert isinstance(tbl, QTable) assert tbl.meta["cosmology"] == cosmo_cls.__qualname__ assert tbl["name"] == cosmo.name assert tbl.indices # indexed # Test each Parameter column has expected information. for n in cosmo.__parameters__: P = getattr(cosmo_cls, n) # Parameter col = tbl[n] # Column # Compare the two assert col.info.name == P.name assert col.info.description == P.__doc__ assert col.info.meta == (cosmo.meta.get(n) or {}) # ----------------------- def test_from_not_table(self, cosmo, from_format): """Test not passing a Table to the Table parser.""" with pytest.raises((TypeError, ValueError)): from_format("NOT A TABLE", format="astropy.table") def test_tofrom_table_instance(self, cosmo_cls, cosmo, from_format, to_format): """Test cosmology -> astropy.table -> cosmology.""" tbl = to_format("astropy.table") # add information tbl["mismatching"] = "will error" # tests are different if the last argument is a **kwarg if tuple(cosmo._init_signature.parameters.values())[-1].kind == 4: got = from_format(tbl, format="astropy.table") assert got.__class__ is cosmo_cls assert got.name == cosmo.name assert "mismatching" not in got.meta return # don't continue testing # read with mismatching parameters errors with pytest.raises(TypeError, match="there are unused parameters"): from_format(tbl, format="astropy.table") # unless mismatched are moved to meta got = from_format(tbl, format="astropy.table", move_to_meta=True) assert got == cosmo assert got.meta["mismatching"] == "will error" # it won't error if everything matches up tbl.remove_column("mismatching") got = from_format(tbl, format="astropy.table") assert got == cosmo # and it will also work if the cosmology is a class # Note this is not the default output of ``to_format``. tbl.meta["cosmology"] = _COSMOLOGY_CLASSES[tbl.meta["cosmology"]] got = from_format(tbl, format="astropy.table") assert got == cosmo # also it auto-identifies 'format' got = from_format(tbl) assert got == cosmo def test_fromformat_table_subclass_partial_info( self, cosmo_cls, cosmo, from_format, to_format ): """ Test writing from an instance and reading from that class. This works with missing information. """ # test to_format tbl = to_format("astropy.table") assert isinstance(tbl, QTable) # partial information tbl.meta.pop("cosmology", None) del tbl["Tcmb0"] # read with the same class that wrote fills in the missing info with # the default value got = cosmo_cls.from_format(tbl, format="astropy.table") got2 = from_format(tbl, format="astropy.table", cosmology=cosmo_cls) got3 = from_format( tbl, format="astropy.table", cosmology=cosmo_cls.__qualname__ ) assert (got == got2) and (got2 == got3) # internal consistency # not equal, because Tcmb0 is changed, which also changes m_nu assert got != cosmo assert got.Tcmb0 == cosmo_cls._init_signature.parameters["Tcmb0"].default assert got.clone(name=cosmo.name, Tcmb0=cosmo.Tcmb0, m_nu=cosmo.m_nu) == cosmo # but the metadata is the same assert got.meta == cosmo.meta @pytest.mark.parametrize("add_index", [True, False]) def test_tofrom_table_mutlirow(self, cosmo_cls, cosmo, from_format, add_index): """Test if table has multiple rows.""" # ------------ # To Table cosmo1 = cosmo.clone(name="row 0") cosmo2 = cosmo.clone(name="row 2") tbl = vstack( [c.to_format("astropy.table") for c in (cosmo1, cosmo, cosmo2)], metadata_conflicts="silent", ) assert isinstance(tbl, QTable) assert tbl.meta["cosmology"] == cosmo_cls.__qualname__ assert tbl[1]["name"] == cosmo.name # whether to add an index. `from_format` can work with or without. if add_index: tbl.add_index("name", unique=True) # ------------ # From Table # it will error on a multi-row table with pytest.raises(ValueError, match="need to select a specific row"): from_format(tbl, format="astropy.table") # unless the index argument is provided got = from_format(tbl, index=1, format="astropy.table") assert got == cosmo # the index can be a string got = from_format(tbl, index=cosmo.name, format="astropy.table") assert got == cosmo # when there's more than one cosmology found tbls = vstack([tbl, tbl], metadata_conflicts="silent") with pytest.raises(ValueError, match="more than one"): from_format(tbls, index=cosmo.name, format="astropy.table") @pytest.mark.parametrize("format", [True, False, None, "astropy.table"]) def test_is_equivalent_to_table(self, cosmo, to_format, format): """Test :meth:`astropy.cosmology.Cosmology.is_equivalent`. This test checks that Cosmology equivalency can be extended to any Python object that can be converted to a Cosmology -- in this case a |Table|. """ obj = to_format("astropy.table") assert not isinstance(obj, Cosmology) is_equiv = cosmo.is_equivalent(obj, format=format) assert is_equiv is (format is not False) class TestToFromTable(ToFromDirectTestBase, ToFromTableTestMixin): """Directly test ``to/from_table``.""" def setup_class(self): self.functions = {"to": to_table, "from": from_table}
74baa3766e6ba62e63f09bdd8b4d8b8e565ad99bc9e051263af74e6876696b83
# Licensed under a 3-clause BSD style license - see LICENSE.rst # THIRD PARTY import pytest # LOCAL from astropy.cosmology.core import _COSMOLOGY_CLASSES, Cosmology from astropy.cosmology.io.row import from_row, to_row from astropy.table import Row from .base import ToFromDirectTestBase, ToFromTestMixinBase ############################################################################### class ToFromRowTestMixin(ToFromTestMixinBase): """ Tests for a Cosmology[To/From]Format with ``format="astropy.row"``. This class will not be directly called by :mod:`pytest` since its name does not begin with ``Test``. To activate the contained tests this class must be inherited in a subclass. Subclasses must define a :func:`pytest.fixture` ``cosmo`` that returns/yields an instance of a |Cosmology|. See ``TestCosmologyToFromFormat`` or ``TestCosmology`` for examples. """ @pytest.mark.parametrize("in_meta", [True, False]) def test_to_row_in_meta(self, cosmo_cls, cosmo, in_meta): """Test where the cosmology class is placed.""" row = cosmo.to_format("astropy.row", cosmology_in_meta=in_meta) # if it's in metadata, it's not a column. And vice versa. if in_meta: assert row.meta["cosmology"] == cosmo_cls.__qualname__ assert "cosmology" not in row.colnames # not also a column else: assert row["cosmology"] == cosmo_cls.__qualname__ assert "cosmology" not in row.meta # ----------------------- def test_from_not_row(self, cosmo, from_format): """Test not passing a Row to the Row parser.""" with pytest.raises(AttributeError): from_format("NOT A ROW", format="astropy.row") def test_tofrom_row_instance(self, cosmo, to_format, from_format): """Test cosmology -> astropy.row -> cosmology.""" # ------------ # To Row row = to_format("astropy.row") assert isinstance(row, Row) assert row["cosmology"] == cosmo.__class__.__qualname__ assert row["name"] == cosmo.name # ------------ # From Row row.table["mismatching"] = "will error" # tests are different if the last argument is a **kwarg if tuple(cosmo._init_signature.parameters.values())[-1].kind == 4: got = from_format(row, format="astropy.row") assert got.__class__ is cosmo.__class__ assert got.name == cosmo.name assert "mismatching" not in got.meta return # don't continue testing # read with mismatching parameters errors with pytest.raises(TypeError, match="there are unused parameters"): from_format(row, format="astropy.row") # unless mismatched are moved to meta got = from_format(row, format="astropy.row", move_to_meta=True) assert got == cosmo assert got.meta["mismatching"] == "will error" # it won't error if everything matches up row.table.remove_column("mismatching") got = from_format(row, format="astropy.row") assert got == cosmo # and it will also work if the cosmology is a class # Note this is not the default output of ``to_format``. cosmology = _COSMOLOGY_CLASSES[row["cosmology"]] row.table.remove_column("cosmology") row.table["cosmology"] = cosmology got = from_format(row, format="astropy.row") assert got == cosmo # also it auto-identifies 'format' got = from_format(row) assert got == cosmo def test_fromformat_row_subclass_partial_info(self, cosmo): """ Test writing from an instance and reading from that class. This works with missing information. """ pass # there are no partial info options @pytest.mark.parametrize("format", [True, False, None, "astropy.row"]) def test_is_equivalent_to_row(self, cosmo, to_format, format): """Test :meth:`astropy.cosmology.Cosmology.is_equivalent`. This test checks that Cosmology equivalency can be extended to any Python object that can be converted to a Cosmology -- in this case a Row. """ obj = to_format("astropy.row") assert not isinstance(obj, Cosmology) is_equiv = cosmo.is_equivalent(obj, format=format) assert is_equiv is (format is not False) class TestToFromTable(ToFromDirectTestBase, ToFromRowTestMixin): """ Directly test ``to/from_row``. These are not public API and are discouraged from use, in favor of ``Cosmology.to/from_format(..., format="astropy.row")``, but should be tested regardless b/c 3rd party packages might use these in their Cosmology I/O. Also, it's cheap to test. """ def setup_class(self): self.functions = {"to": to_row, "from": from_row}
bf4c0fbddd8ba3c4609194191bf1e34a0a0f94ec80f64d902e8bac0c3aabafcb
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Testing :mod:`astropy.cosmology.flrw.wpwazpcdm`.""" ############################################################################## # IMPORTS import numpy as np # THIRD PARTY import pytest # LOCAL import astropy.cosmology.units as cu import astropy.units as u from astropy.cosmology import wpwaCDM from astropy.cosmology.parameter import Parameter from astropy.cosmology.tests.test_core import ParameterTestMixin from astropy.utils.compat.optional_deps import HAS_SCIPY from .test_base import FLRWTest from .test_w0wacdm import ParameterwaTestMixin ############################################################################## # TESTS ############################################################################## class ParameterwpTestMixin(ParameterTestMixin): """Tests for `astropy.cosmology.Parameter` wp on a Cosmology. wp is a descriptor, which are tested by mixin, here with ``TestFLRW``. These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the args and kwargs for the cosmology class, respectively. See ``TestFLRW``. """ def test_wp(self, cosmo_cls, cosmo): """Test Parameter ``wp``.""" # on the class assert isinstance(cosmo_cls.wp, Parameter) assert "at the pivot" in cosmo_cls.wp.__doc__ assert cosmo_cls.wp.unit is None # on the instance assert cosmo.wp is cosmo._wp assert cosmo.wp == self.cls_kwargs["wp"] def test_init_wp(self, cosmo_cls, ba): """Test initialization for values of ``wp``.""" # test that it works with units ba.arguments["wp"] = ba.arguments["wp"] << u.one # ensure units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.wp == ba.arguments["wp"] # also without units ba.arguments["wp"] = ba.arguments["wp"].value # strip units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.wp == ba.arguments["wp"] # must be dimensionless ba.arguments["wp"] = 10 * u.km with pytest.raises(TypeError): cosmo_cls(*ba.args, **ba.kwargs) class ParameterzpTestMixin(ParameterTestMixin): """Tests for `astropy.cosmology.Parameter` zp on a Cosmology. zp is a descriptor, which are tested by mixin, here with ``TestFLRW``. These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the args and kwargs for the cosmology class, respectively. See ``TestFLRW``. """ def test_zp(self, cosmo_cls, cosmo): """Test Parameter ``zp``.""" # on the class assert isinstance(cosmo_cls.zp, Parameter) assert "pivot redshift" in cosmo_cls.zp.__doc__ assert cosmo_cls.zp.unit == cu.redshift # on the instance assert cosmo.zp is cosmo._zp assert cosmo.zp == self.cls_kwargs["zp"] << cu.redshift def test_init_zp(self, cosmo_cls, ba): """Test initialization for values of ``zp``.""" # test that it works with units ba.arguments["zp"] = ba.arguments["zp"] << u.one # ensure units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.zp == ba.arguments["zp"] # also without units ba.arguments["zp"] = ba.arguments["zp"].value # strip units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.zp.value == ba.arguments["zp"] # must be dimensionless ba.arguments["zp"] = 10 * u.km with pytest.raises(u.UnitConversionError): cosmo_cls(*ba.args, **ba.kwargs) class TestwpwaCDM( FLRWTest, ParameterwpTestMixin, ParameterwaTestMixin, ParameterzpTestMixin ): """Test :class:`astropy.cosmology.wpwaCDM`.""" def setup_class(self): """Setup for testing.""" super().setup_class(self) self.cls = wpwaCDM self.cls_kwargs.update(wp=-0.9, wa=0.2, zp=0.5) # =============================================================== # Method & Attribute Tests def test_clone_change_param(self, cosmo): """Test method ``.clone()`` changing a(many) Parameter(s).""" super().test_clone_change_param(cosmo) # `w` params c = cosmo.clone(wp=0.1, wa=0.2, zp=14) assert c.wp == 0.1 assert c.wa == 0.2 assert c.zp == 14 for n in set(cosmo.__parameters__) - {"wp", "wa", "zp"}: v = getattr(c, n) if v is None: assert v is getattr(cosmo, n) else: assert u.allclose( v, getattr(cosmo, n), atol=1e-4 * getattr(v, "unit", 1) ) # @pytest.mark.parametrize("z", valid_zs) # TODO! recompute comparisons below def test_w(self, cosmo): """Test :meth:`astropy.cosmology.wpwaCDM.w`.""" # super().test_w(cosmo, z) assert u.allclose(cosmo.w(0.5), -0.9) assert u.allclose( cosmo.w([0.1, 0.2, 0.5, 1.5, 2.5, 11.5]), [-0.94848485, -0.93333333, -0.9, -0.84666667, -0.82380952, -0.78266667], ) def test_repr(self, cosmo_cls, cosmo): """Test method ``.__repr__()``.""" super().test_repr(cosmo_cls, cosmo) expected = ( 'wpwaCDM(name="ABCMeta", H0=70.0 km / (Mpc s), Om0=0.27,' " Ode0=0.73, wp=-0.9, wa=0.2, zp=0.5 redshift, Tcmb0=3.0 K," " Neff=3.04, m_nu=[0. 0. 0.] eV, Ob0=0.03)" ) assert repr(cosmo) == expected # =============================================================== # Usage Tests @pytest.mark.skipif(not HAS_SCIPY, reason="scipy required for this test.") @pytest.mark.parametrize( ("args", "kwargs", "expected"), [ ( # no relativistic species (75.0, 0.3, 0.6), {"wp": -0.9, "zp": 0.5, "wa": 0.1, "Tcmb0": 0.0}, [2954.68975298, 4599.83254834, 5643.04013201, 6373.36147627] * u.Mpc, ), ( # massless neutrinos (75.0, 0.25, 0.5), { "wp": -0.9, "zp": 0.4, "wa": 0.1, "Tcmb0": 3.0, "Neff": 3, "m_nu": u.Quantity(0.0, u.eV), }, [2919.00656215, 4558.0218123, 5615.73412391, 6366.10224229] * u.Mpc, ), ( # massive neutrinos (75.0, 0.25, 0.5), { "wp": -0.9, "zp": 1.0, "wa": 0.1, "Tcmb0": 3.0, "Neff": 4, "m_nu": u.Quantity(5.0, u.eV), }, [2629.48489827, 3874.13392319, 4614.31562397, 5116.51184842] * u.Mpc, ), ], ) def test_comoving_distance_example(self, cosmo_cls, args, kwargs, expected): """Test :meth:`astropy.cosmology.LambdaCDM.comoving_distance`. These do not come from external codes -- they are just internal checks to make sure nothing changes if we muck with the distance calculators. """ super().test_comoving_distance_example(cosmo_cls, args, kwargs, expected) ############################################################################### # Comparison to Other Codes @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy.") def test_varyde_lumdist_mathematica(): """Tests a few varying dark energy EOS models against a Mathematica computation.""" z = np.array([0.2, 0.4, 0.9, 1.2]) # wpwa models cosmo = wpwaCDM(H0=70, Om0=0.2, Ode0=0.8, wp=-1.1, wa=0.2, zp=0.5, Tcmb0=0.0) assert u.allclose( cosmo.luminosity_distance(z), [1010.81, 2294.45, 6369.45, 9218.95] * u.Mpc, rtol=1e-4, ) cosmo = wpwaCDM(H0=70, Om0=0.2, Ode0=0.8, wp=-1.1, wa=0.2, zp=0.9, Tcmb0=0.0) assert u.allclose( cosmo.luminosity_distance(z), [1013.68, 2305.3, 6412.37, 9283.33] * u.Mpc, rtol=1e-4, ) ############################################################################## # Miscellaneous # TODO: these should be better integrated into the new test framework @pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy") def test_de_densityscale(): cosmo = wpwaCDM(H0=70, Om0=0.3, Ode0=0.70, wp=-0.9, wa=0.2, zp=0.5) z = np.array([0.1, 0.2, 0.5, 1.5, 2.5]) assert u.allclose( cosmo.de_density_scale(z), [1.012246048, 1.0280102, 1.087439, 1.324988, 1.565746], rtol=1e-4, ) assert u.allclose(cosmo.de_density_scale(3), cosmo.de_density_scale(3.0), rtol=1e-7) assert u.allclose( cosmo.de_density_scale([1, 2, 3]), cosmo.de_density_scale([1.0, 2.0, 3.0]), rtol=1e-7, )
09a5f987e537d019b99ba67303095fedff033cac847e7013f6f08be5efdb7300
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Testing :mod:`astropy.cosmology.flrw.lambdacdm`.""" ############################################################################## # IMPORTS # STDLIB # THIRD PARTY import pathlib import numpy as np import pytest # LOCAL import astropy.constants as const import astropy.cosmology.units as cu import astropy.units as u from astropy.cosmology import FlatLambdaCDM, LambdaCDM from astropy.cosmology.flrw.lambdacdm import ellipkinc, hyp2f1 from astropy.cosmology.tests.helper import get_redshift_methods from astropy.cosmology.tests.test_core import invalid_zs, valid_zs from astropy.table import QTable from astropy.utils.compat.optional_deps import HAS_SCIPY from astropy.utils.exceptions import AstropyUserWarning from .test_base import FlatFLRWMixinTest, FLRWTest ############################################################################## # TESTS ############################################################################## @pytest.mark.skipif(HAS_SCIPY, reason="scipy is installed") def test_optional_deps_functions(): """Test stand-in functions when optional dependencies not installed.""" with pytest.raises(ModuleNotFoundError, match="No module named 'scipy.special'"): ellipkinc() with pytest.raises(ModuleNotFoundError, match="No module named 'scipy.special'"): hyp2f1() ############################################################################## class TestLambdaCDM(FLRWTest): """Test :class:`astropy.cosmology.LambdaCDM`.""" def setup_class(self): """Setup for testing.""" super().setup_class(self) self.cls = LambdaCDM # =============================================================== # Method & Attribute Tests _FLRW_redshift_methods = get_redshift_methods( LambdaCDM, include_private=True, include_z2=False ) - {"_dS_age"} # `_dS_age` is removed because it doesn't strictly rely on the value of `z`, # so any input that doesn't trip up ``np.shape`` is "valid" @pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed") @pytest.mark.parametrize("z, exc", invalid_zs) @pytest.mark.parametrize("method", _FLRW_redshift_methods) def test_redshift_method_bad_input(self, cosmo, method, z, exc): """Test all the redshift methods for bad input.""" super().test_redshift_method_bad_input(cosmo, method, z, exc) @pytest.mark.parametrize("z", valid_zs) def test_w(self, cosmo, z): """Test :meth:`astropy.cosmology.LambdaCDM.w`.""" super().test_w(cosmo, z) w = cosmo.w(z) assert u.allclose(w, -1.0) def test_repr(self, cosmo_cls, cosmo): """Test method ``.__repr__()``.""" super().test_repr(cosmo_cls, cosmo) expected = ( 'LambdaCDM(name="ABCMeta", H0=70.0 km / (Mpc s), Om0=0.27,' " Ode0=0.73, Tcmb0=3.0 K, Neff=3.04, m_nu=[0. 0. 0.] eV," " Ob0=0.03)" ) assert repr(cosmo) == expected @pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed") @pytest.mark.parametrize( ("args", "kwargs", "expected"), [ ( # no relativistic species (75.0, 0.25, 0.5), {"Tcmb0": 0.0}, [2953.93001902, 4616.7134253, 5685.07765971, 6440.80611897] * u.Mpc, ), ( # massless neutrinos (75.0, 0.25, 0.6), {"Tcmb0": 3.0, "Neff": 3, "m_nu": u.Quantity(0.0, u.eV)}, [3037.12620424, 4776.86236327, 5889.55164479, 6671.85418235] * u.Mpc, ), ( # massive neutrinos (75.0, 0.3, 0.4), {"Tcmb0": 3.0, "Neff": 3, "m_nu": u.Quantity(10.0, u.eV)}, [2471.80626824, 3567.1902565, 4207.15995626, 4638.20476018] * u.Mpc, ), ], ) def test_comoving_distance_example(self, cosmo_cls, args, kwargs, expected): """Test :meth:`astropy.cosmology.LambdaCDM.comoving_distance`. These do not come from external codes -- they are just internal checks to make sure nothing changes if we muck with the distance calculators. """ super().test_comoving_distance_example(cosmo_cls, args, kwargs, expected) # ----------------------------------------------------------------------------- class TestFlatLambdaCDM(FlatFLRWMixinTest, TestLambdaCDM): """Test :class:`astropy.cosmology.FlatLambdaCDM`.""" def setup_class(self): """Setup for testing.""" super().setup_class(self) self.cls = FlatLambdaCDM @pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed") @pytest.mark.parametrize("z, exc", invalid_zs) @pytest.mark.parametrize("method", TestLambdaCDM._FLRW_redshift_methods - {"Otot"}) def test_redshift_method_bad_input(self, cosmo, method, z, exc): """Test all the redshift methods for bad input.""" super().test_redshift_method_bad_input(cosmo, method, z, exc) # =============================================================== # Method & Attribute Tests def test_repr(self, cosmo_cls, cosmo): """Test method ``.__repr__()``.""" super().test_repr(cosmo_cls, cosmo) expected = ( 'FlatLambdaCDM(name="ABCMeta", H0=70.0 km / (Mpc s),' " Om0=0.27, Tcmb0=3.0 K, Neff=3.04, m_nu=[0. 0. 0.] eV," " Ob0=0.03)" ) assert repr(cosmo) == expected # =============================================================== # Usage Tests @pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed") @pytest.mark.parametrize( ("args", "kwargs", "expected"), [ ( # no relativistic species (75.0, 0.25), {"Tcmb0": 0.0}, [3180.83488552, 5060.82054204, 6253.6721173, 7083.5374303] * u.Mpc, ), ( # massless neutrinos (75.0, 0.25), {"Tcmb0": 3.0, "Neff": 3, "m_nu": u.Quantity(0.0, u.eV)}, [3180.42662867, 5059.60529655, 6251.62766102, 7080.71698117] * u.Mpc, ), ( # massive neutrinos (75.0, 0.25), {"Tcmb0": 3.0, "Neff": 3, "m_nu": u.Quantity(10.0, u.eV)}, [2337.54183142, 3371.91131264, 3988.40711188, 4409.09346922] * u.Mpc, ), ( # work the scalar nu density functions (75.0, 0.25), {"Tcmb0": 3.0, "m_nu": u.Quantity([10.0, 0, 0], u.eV)}, [2777.71589173, 4186.91111666, 5046.0300719, 5636.10397302] * u.Mpc, ), ( # work the scalar nu density functions (75.0, 0.25), {"Tcmb0": 3.0, "m_nu": u.Quantity([10.0, 5, 0], u.eV)}, [2636.48149391, 3913.14102091, 4684.59108974, 5213.07557084] * u.Mpc, ), ( # work the scalar nu density functions (75.0, 0.25), {"Tcmb0": 3.0, "m_nu": u.Quantity([4.0, 5, 9], u.eV)}, [2563.5093049, 3776.63362071, 4506.83448243, 5006.50158829] * u.Mpc, ), ( # work the scalar nu density functions (75.0, 0.25), {"Tcmb0": 3.0, "Neff": 4.2, "m_nu": u.Quantity([1.0, 4.0, 5, 9], u.eV)}, [2525.58017482, 3706.87633298, 4416.58398847, 4901.96669755] * u.Mpc, ), ], ) def test_comoving_distance_example(self, cosmo_cls, args, kwargs, expected): """Test :meth:`astropy.cosmology.LambdaCDM.comoving_distance`. These do not come from external codes -- they are just internal checks to make sure nothing changes if we muck with the distance calculators. """ super().test_comoving_distance_example(cosmo_cls, args, kwargs, expected) ############################################################################## # Comparison to Other Codes @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy.") def test_flat_z1(): """Test a flat cosmology at z=1 against several other on-line calculators. Test values were taken from the following web cosmology calculators on 2012-02-11: Wright: http://www.astro.ucla.edu/~wright/CosmoCalc.html (https://ui.adsabs.harvard.edu/abs/2006PASP..118.1711W) Kempner: http://www.kempner.net/cosmic.php iCosmos: http://www.icosmos.co.uk/index.html """ cosmo = FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=0.0) # The order of values below is Wright, Kempner, iCosmos' assert u.allclose( cosmo.comoving_distance(1), [3364.5, 3364.8, 3364.7988] * u.Mpc, rtol=1e-4 ) assert u.allclose( cosmo.angular_diameter_distance(1), [1682.3, 1682.4, 1682.3994] * u.Mpc, rtol=1e-4, ) assert u.allclose( cosmo.luminosity_distance(1), [6729.2, 6729.6, 6729.5976] * u.Mpc, rtol=1e-4 ) assert u.allclose( cosmo.lookback_time(1), [7.841, 7.84178, 7.843] * u.Gyr, rtol=1e-3 ) assert u.allclose( cosmo.lookback_distance(1), [2404.0, 2404.24, 2404.4] * u.Mpc, rtol=1e-3 ) ############################################################################## # Regression Tests SPECIALIZED_COMOVING_DISTANCE_COSMOLOGIES = [ FlatLambdaCDM(H0=70, Om0=0.0, Tcmb0=0.0), # de Sitter FlatLambdaCDM(H0=70, Om0=1.0, Tcmb0=0.0), # Einstein - de Sitter FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=0.0), # Hypergeometric LambdaCDM(H0=70, Om0=0.3, Ode0=0.6, Tcmb0=0.0), # Elliptic ] ITERABLE_REDSHIFTS = [ (0, 1, 2, 3, 4), # tuple [0, 1, 2, 3, 4], # list np.array([0, 1, 2, 3, 4]), # array ] @pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy") @pytest.mark.parametrize("cosmo", SPECIALIZED_COMOVING_DISTANCE_COSMOLOGIES) @pytest.mark.parametrize("z", ITERABLE_REDSHIFTS) def test_comoving_distance_iterable_argument(cosmo, z): """ Regression test for #10980 Test that specialized comoving distance methods handle iterable arguments. """ assert u.allclose( cosmo.comoving_distance(z), cosmo._integral_comoving_distance_z1z2(0.0, z) ) @pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy") @pytest.mark.parametrize("cosmo", SPECIALIZED_COMOVING_DISTANCE_COSMOLOGIES) def test_comoving_distance_broadcast(cosmo): """ Regression test for #10980 Test that specialized comoving distance methods broadcast array arguments. """ z1 = np.zeros((2, 5)) z2 = np.ones((3, 1, 5)) z3 = np.ones((7, 5)) output_shape = np.broadcast(z1, z2).shape # Check compatible array arguments return an array with the correct shape assert cosmo._comoving_distance_z1z2(z1, z2).shape == output_shape # Check incompatible array arguments raise an error with pytest.raises(ValueError, match="z1 and z2 have different shapes"): cosmo._comoving_distance_z1z2(z1, z3) @pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy") def test_elliptic_comoving_distance_z1z2(): """Regression test for #8388.""" cosmo = LambdaCDM(70.0, 2.3, 0.05, Tcmb0=0) z = 0.2 assert u.allclose( cosmo.comoving_distance(z), cosmo._integral_comoving_distance_z1z2(0.0, z) ) assert u.allclose( cosmo._elliptic_comoving_distance_z1z2(0.0, z), cosmo._integral_comoving_distance_z1z2(0.0, z), ) @pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy") def test_ogamma(): """Tests the effects of changing the temperature of the CMB""" # Tested against Ned Wright's advanced cosmology calculator, # Sep 7 2012. The accuracy of our comparison is limited by # how many digits it outputs, which limits our test to about # 0.2% accuracy. The NWACC does not allow one # to change the number of nuetrino species, fixing that at 3. # Also, inspection of the NWACC code shows it uses inaccurate # constants at the 0.2% level (specifically, a_B), # so we shouldn't expect to match it that well. The integral is # also done rather crudely. Therefore, we should not expect # the NWACC to be accurate to better than about 0.5%, which is # unfortunate, but reflects a problem with it rather than this code. # More accurate tests below using Mathematica z = np.array([1.0, 10.0, 500.0, 1000.0]) cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=0, Neff=3) assert u.allclose( cosmo.angular_diameter_distance(z), [1651.9, 858.2, 26.855, 13.642] * u.Mpc, rtol=5e-4, ) cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725, Neff=3) assert u.allclose( cosmo.angular_diameter_distance(z), [1651.8, 857.9, 26.767, 13.582] * u.Mpc, rtol=5e-4, ) cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=4.0, Neff=3) assert u.allclose( cosmo.angular_diameter_distance(z), [1651.4, 856.6, 26.489, 13.405] * u.Mpc, rtol=5e-4, ) # Next compare with doing the integral numerically in Mathematica, # which allows more precision in the test. It is at least as # good as 0.01%, possibly better cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=0, Neff=3.04) assert u.allclose( cosmo.angular_diameter_distance(z), [1651.91, 858.205, 26.8586, 13.6469] * u.Mpc, rtol=1e-5, ) cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725, Neff=3.04) assert u.allclose( cosmo.angular_diameter_distance(z), [1651.76, 857.817, 26.7688, 13.5841] * u.Mpc, rtol=1e-5, ) cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=4.0, Neff=3.04) assert u.allclose( cosmo.angular_diameter_distance(z), [1651.21, 856.411, 26.4845, 13.4028] * u.Mpc, rtol=1e-5, ) # Just to be really sure, we also do a version where the integral # is analytic, which is a Ode = 0 flat universe. In this case # Integrate(1/E(x),{x,0,z}) = 2 ( sqrt((1+Or z)/(1+z)) - 1 )/(Or - 1) # Recall that c/H0 * Integrate(1/E) is FLRW.comoving_distance. Ogamma0h2 = 4 * 5.670373e-8 / 299792458.0**3 * 2.725**4 / 1.87837e-26 Onu0h2 = Ogamma0h2 * 7.0 / 8.0 * (4.0 / 11.0) ** (4.0 / 3.0) * 3.04 Or0 = (Ogamma0h2 + Onu0h2) / 0.7**2 Om0 = 1.0 - Or0 hubdis = (299792.458 / 70.0) * u.Mpc cosmo = FlatLambdaCDM(H0=70, Om0=Om0, Tcmb0=2.725, Neff=3.04) targvals = 2.0 * hubdis * (np.sqrt((1.0 + Or0 * z) / (1.0 + z)) - 1.0) / (Or0 - 1.0) assert u.allclose(cosmo.comoving_distance(z), targvals, rtol=1e-5) # And integers for z assert u.allclose(cosmo.comoving_distance(z.astype(int)), targvals, rtol=1e-5) # Try Tcmb0 = 4 Or0 *= (4.0 / 2.725) ** 4 Om0 = 1.0 - Or0 cosmo = FlatLambdaCDM(H0=70, Om0=Om0, Tcmb0=4.0, Neff=3.04) targvals = 2.0 * hubdis * (np.sqrt((1.0 + Or0 * z) / (1.0 + z)) - 1.0) / (Or0 - 1.0) assert u.allclose(cosmo.comoving_distance(z), targvals, rtol=1e-5) @pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy") @pytest.mark.parametrize( "file_name", ["cosmo_flat.ecsv", "cosmo_open.ecsv", "cosmo_closed.ecsv"] ) def test_flat_open_closed_icosmo(file_name): """Test against the tabulated values generated from icosmo.org with three example cosmologies (flat, open and closed). """ with u.add_enabled_units(cu): tbl = QTable.read(pathlib.Path(__file__).parent / "data" / file_name) cosmo = LambdaCDM( H0=100 * tbl.meta["h"], Om0=tbl.meta["Om"], Ode0=tbl.meta["Ol"], Tcmb0=0.0 ) assert u.allclose(cosmo.comoving_transverse_distance(tbl["redshift"]), tbl["dm"]) assert u.allclose(cosmo.angular_diameter_distance(tbl["redshift"]), tbl["da"]) assert u.allclose(cosmo.luminosity_distance(tbl["redshift"]), tbl["dl"]) @pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy") def test_comoving_transverse_distance_z1z2(): tcos = FlatLambdaCDM(100, 0.3, Tcmb0=0.0) with pytest.raises(ValueError): # test diff size z1, z2 fail tcos._comoving_transverse_distance_z1z2((1, 2), (3, 4, 5)) # Tests that should actually work, target values computed with # http://www.astro.multivax.de:8000/phillip/angsiz_prog/README.HTML # Kayser, Helbig, and Schramm (Astron.Astrophys. 318 (1997) 680-686) assert u.allclose( tcos._comoving_transverse_distance_z1z2(1, 2), 1313.2232194828466 * u.Mpc ) # In a flat universe comoving distance and comoving transverse # distance are identical z1 = 0, 0, 2, 0.5, 1 z2 = 2, 1, 1, 2.5, 1.1 assert u.allclose( tcos._comoving_distance_z1z2(z1, z2), tcos._comoving_transverse_distance_z1z2(z1, z2), ) # Test Flat Universe with Omega_M > 1. Rarely used, but perfectly valid. tcos = FlatLambdaCDM(100, 1.5, Tcmb0=0.0) results = ( 2202.72682564, 1559.51679971, -643.21002593, 1408.36365679, 85.09286258, ) * u.Mpc assert u.allclose(tcos._comoving_transverse_distance_z1z2(z1, z2), results) # In a flat universe comoving distance and comoving transverse # distance are identical z1 = 0, 0, 2, 0.5, 1 z2 = 2, 1, 1, 2.5, 1.1 assert u.allclose( tcos._comoving_distance_z1z2(z1, z2), tcos._comoving_transverse_distance_z1z2(z1, z2), ) # Test non-flat cases to avoid simply testing # comoving_distance_z1z2. Test array, array case. tcos = LambdaCDM(100, 0.3, 0.5, Tcmb0=0.0) results = ( 3535.931375645655, 2226.430046551708, -1208.6817970036532, 2595.567367601969, 151.36592003406884, ) * u.Mpc assert u.allclose(tcos._comoving_transverse_distance_z1z2(z1, z2), results) # Test positive curvature with scalar, array combination. tcos = LambdaCDM(100, 1.0, 0.2, Tcmb0=0.0) z1 = 0.1 z2 = 0, 0.1, 0.2, 0.5, 1.1, 2 results = ( -281.31602666724865, 0.0, 248.58093707820436, 843.9331377460543, 1618.6104987686672, 2287.5626543279927, ) * u.Mpc assert u.allclose(tcos._comoving_transverse_distance_z1z2(z1, z2), results) @pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy") def test_angular_diameter_distance_z1z2(): tcos = FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0) with pytest.raises(ValueError): # test diff size z1, z2 fail tcos.angular_diameter_distance_z1z2([1, 2], [3, 4, 5]) # Tests that should actually work, target values computed with # http://www.astro.multivax.de:8000/phillip/angsiz_prog/README.HTML # Kayser, Helbig, and Schramm (Astron.Astrophys. 318 (1997) 680-686) assert u.allclose( tcos.angular_diameter_distance_z1z2(1, 2), 646.22968662822018 * u.Mpc ) z1 = 2 # Separate test for z2<z1, returns negative value with warning z2 = 1 results = -969.34452994 * u.Mpc with pytest.warns(AstropyUserWarning, match="less than first redshift"): assert u.allclose(tcos.angular_diameter_distance_z1z2(z1, z2), results) z1 = 0, 0, 0.5, 1 z2 = 2, 1, 2.5, 1.1 results = ( 1760.0628637762106, 1670.7497657219858, 1159.0970895962193, 115.72768186186921, ) * u.Mpc assert u.allclose(tcos.angular_diameter_distance_z1z2(z1, z2), results) z1 = 0.1 z2 = 0.1, 0.2, 0.5, 1.1, 2 results = (0.0, 332.09893173, 986.35635069, 1508.37010062, 1621.07937976) * u.Mpc assert u.allclose(tcos.angular_diameter_distance_z1z2(0.1, z2), results) # Non-flat (positive Ok0) test tcos = LambdaCDM(H0=70.4, Om0=0.2, Ode0=0.5, Tcmb0=0.0) assert u.allclose( tcos.angular_diameter_distance_z1z2(1, 2), 620.1175337852428 * u.Mpc ) # Non-flat (negative Ok0) test tcos = LambdaCDM(H0=100, Om0=2, Ode0=1, Tcmb0=0.0) assert u.allclose( tcos.angular_diameter_distance_z1z2(1, 2), 228.42914659246014 * u.Mpc ) @pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy") def test_massivenu_density(): # Testing neutrino density calculation # Simple test cosmology, where we compare rho_nu and rho_gamma # against the exact formula (eq 24/25 of Komatsu et al. 2011) # computed using Mathematica. The approximation we use for f(y) # is only good to ~ 0.5% (with some redshift dependence), so that's # what we test to. ztest = np.array([0.0, 1.0, 2.0, 10.0, 1000.0]) nuprefac = 7.0 / 8.0 * (4.0 / 11.0) ** (4.0 / 3.0) # First try 3 massive neutrinos, all 100 eV -- note this is a universe # seriously dominated by neutrinos! tcos = FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3, m_nu=u.Quantity(100.0, u.eV)) assert tcos.has_massive_nu assert tcos.Neff == 3 nurel_exp = ( nuprefac * tcos.Neff * np.array([171969, 85984.5, 57323, 15633.5, 171.801]) ) assert u.allclose(tcos.nu_relative_density(ztest), nurel_exp, rtol=5e-3) assert u.allclose(tcos.efunc([0.0, 1.0]), [1.0, 7.46144727668], rtol=5e-3) # Next, slightly less massive tcos = FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.25, u.eV)) nurel_exp = ( nuprefac * tcos.Neff * np.array([429.924, 214.964, 143.312, 39.1005, 1.11086]) ) assert u.allclose(tcos.nu_relative_density(ztest), nurel_exp, rtol=5e-3) # For this one also test Onu directly onu_exp = np.array([0.01890217, 0.05244681, 0.0638236, 0.06999286, 0.1344951]) assert u.allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3) # And fairly light tcos = FlatLambdaCDM(80.0, 0.30, Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.01, u.eV)) nurel_exp = ( nuprefac * tcos.Neff * np.array([17.2347, 8.67345, 5.84348, 1.90671, 1.00021]) ) assert u.allclose(tcos.nu_relative_density(ztest), nurel_exp, rtol=5e-3) onu_exp = np.array([0.00066599, 0.00172677, 0.0020732, 0.00268404, 0.0978313]) assert u.allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3) assert u.allclose(tcos.efunc([1.0, 2.0]), [1.76225893, 2.97022048], rtol=1e-4) assert u.allclose(tcos.inv_efunc([1.0, 2.0]), [0.5674535, 0.33667534], rtol=1e-4) # Now a mixture of neutrino masses, with non-integer Neff tcos = FlatLambdaCDM( 80.0, 0.30, Tcmb0=3.0, Neff=3.04, m_nu=u.Quantity([0.0, 0.01, 0.25], u.eV) ) nurel_exp = ( nuprefac * tcos.Neff * np.array([149.386233, 74.87915, 50.0518, 14.002403, 1.03702333]) ) assert u.allclose(tcos.nu_relative_density(ztest), nurel_exp, rtol=5e-3) onu_exp = np.array([0.00584959, 0.01493142, 0.01772291, 0.01963451, 0.10227728]) assert u.allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3) # Integer redshifts ztest = ztest.astype(int) assert u.allclose(tcos.nu_relative_density(ztest), nurel_exp, rtol=5e-3) assert u.allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3) ############################################################################## # Miscellaneous # TODO: these should be better integrated into the new test framework @pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy") def test_units(): """Test if the right units are being returned""" cosmo = FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=2.0) assert cosmo.comoving_distance(1.0).unit == u.Mpc assert cosmo._comoving_distance_z1z2(1.0, 2.0).unit == u.Mpc assert cosmo.comoving_transverse_distance(1.0).unit == u.Mpc assert cosmo._comoving_transverse_distance_z1z2(1.0, 2.0).unit == u.Mpc assert cosmo.angular_diameter_distance(1.0).unit == u.Mpc assert cosmo.angular_diameter_distance_z1z2(1.0, 2.0).unit == u.Mpc assert cosmo.luminosity_distance(1.0).unit == u.Mpc assert cosmo.lookback_time(1.0).unit == u.Gyr assert cosmo.lookback_distance(1.0).unit == u.Mpc assert cosmo.H(1.0).unit == u.km / u.Mpc / u.s assert cosmo.Tcmb(1.0).unit == u.K assert cosmo.Tcmb([0.0, 1.0]).unit == u.K assert cosmo.Tnu(1.0).unit == u.K assert cosmo.Tnu([0.0, 1.0]).unit == u.K assert cosmo.arcsec_per_kpc_comoving(1.0).unit == u.arcsec / u.kpc assert cosmo.arcsec_per_kpc_proper(1.0).unit == u.arcsec / u.kpc assert cosmo.kpc_comoving_per_arcmin(1.0).unit == u.kpc / u.arcmin assert cosmo.kpc_proper_per_arcmin(1.0).unit == u.kpc / u.arcmin assert cosmo.critical_density(1.0).unit == u.g / u.cm**3 assert cosmo.comoving_volume(1.0).unit == u.Mpc**3 assert cosmo.age(1.0).unit == u.Gyr assert cosmo.distmod(1.0).unit == u.mag def test_xtfuncs(): """Test of absorption and lookback integrand""" cosmo = LambdaCDM(70, 0.3, 0.5, Tcmb0=2.725) z = np.array([2.0, 3.2]) assert u.allclose(cosmo.lookback_time_integrand(3), 0.052218976654969378, rtol=1e-4) assert u.allclose( cosmo.lookback_time_integrand(z), [0.10333179, 0.04644541], rtol=1e-4 ) assert u.allclose(cosmo.abs_distance_integrand(3), 3.3420145059180402, rtol=1e-4) assert u.allclose( cosmo.abs_distance_integrand(z), [2.7899584, 3.44104758], rtol=1e-4 ) @pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy") def test_matter(): # Test non-relativistic matter evolution tcos = FlatLambdaCDM(70.0, 0.3, Ob0=0.045) assert u.allclose(tcos.Om(0), 0.3) assert u.allclose(tcos.Ob(0), 0.045) z = np.array([0.0, 0.5, 1.0, 2.0]) assert u.allclose(tcos.Om(z), [0.3, 0.59124088, 0.77419355, 0.92045455], rtol=1e-4) assert u.allclose( tcos.Ob(z), [0.045, 0.08868613, 0.11612903, 0.13806818], rtol=1e-4 ) assert u.allclose( tcos.Odm(z), [0.255, 0.50255474, 0.65806452, 0.78238636], rtol=1e-4 ) # Consistency of dark and baryonic matter evolution with all # non-relativistic matter assert u.allclose(tcos.Ob(z) + tcos.Odm(z), tcos.Om(z)) @pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy") def test_ocurv(): # Test Ok evolution # Flat, boring case tcos = FlatLambdaCDM(70.0, 0.3) assert u.allclose(tcos.Ok0, 0.0) assert u.allclose(tcos.Ok(0), 0.0) z = np.array([0.0, 0.5, 1.0, 2.0]) assert u.allclose(tcos.Ok(z), [0.0, 0.0, 0.0, 0.0], rtol=1e-6) # Not flat tcos = LambdaCDM(70.0, 0.3, 0.5, Tcmb0=u.Quantity(0.0, u.K)) assert u.allclose(tcos.Ok0, 0.2) assert u.allclose(tcos.Ok(0), 0.2) assert u.allclose(tcos.Ok(z), [0.2, 0.22929936, 0.21621622, 0.17307692], rtol=1e-4) # Test the sum; note that Ogamma/Onu are 0 assert u.allclose( tcos.Ok(z) + tcos.Om(z) + tcos.Ode(z), [1.0, 1.0, 1.0, 1.0], rtol=1e-5 ) @pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy") def test_ode(): # Test Ode evolution, turn off neutrinos, cmb tcos = FlatLambdaCDM(70.0, 0.3, Tcmb0=0) assert u.allclose(tcos.Ode0, 0.7) assert u.allclose(tcos.Ode(0), 0.7) z = np.array([0.0, 0.5, 1.0, 2.0]) assert u.allclose(tcos.Ode(z), [0.7, 0.408759, 0.2258065, 0.07954545], rtol=1e-5) @pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy") def test_tcmb(): cosmo = FlatLambdaCDM(70.4, 0.272, Tcmb0=2.5) assert u.allclose(cosmo.Tcmb0, 2.5 * u.K) assert u.allclose(cosmo.Tcmb(2), 7.5 * u.K) z = [0.0, 1.0, 2.0, 3.0, 9.0] assert u.allclose(cosmo.Tcmb(z), [2.5, 5.0, 7.5, 10.0, 25.0] * u.K, rtol=1e-6) # Make sure it's the same for integers z = [0, 1, 2, 3, 9] assert u.allclose(cosmo.Tcmb(z), [2.5, 5.0, 7.5, 10.0, 25.0] * u.K, rtol=1e-6) @pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy") def test_tnu(): cosmo = FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0) assert u.allclose(cosmo.Tnu0, 2.1412975665108247 * u.K, rtol=1e-6) assert u.allclose(cosmo.Tnu(2), 6.423892699532474 * u.K, rtol=1e-6) z = [0.0, 1.0, 2.0, 3.0] expected = [2.14129757, 4.28259513, 6.4238927, 8.56519027] * u.K assert u.allclose(cosmo.Tnu(z), expected, rtol=1e-6) # Test for integers z = [0, 1, 2, 3] assert u.allclose(cosmo.Tnu(z), expected, rtol=1e-6) @pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy") def test_kpc_methods(): cosmo = FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0) assert u.allclose(cosmo.arcsec_per_kpc_comoving(3), 0.0317179167 * u.arcsec / u.kpc) assert u.allclose(cosmo.arcsec_per_kpc_proper(3), 0.1268716668 * u.arcsec / u.kpc) assert u.allclose(cosmo.kpc_comoving_per_arcmin(3), 1891.6753126 * u.kpc / u.arcmin) assert u.allclose(cosmo.kpc_proper_per_arcmin(3), 472.918828 * u.kpc / u.arcmin) @pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy") def test_comoving_volume(): c_flat = LambdaCDM(H0=70, Om0=0.27, Ode0=0.73, Tcmb0=0.0) c_open = LambdaCDM(H0=70, Om0=0.27, Ode0=0.0, Tcmb0=0.0) c_closed = LambdaCDM(H0=70, Om0=2, Ode0=0.0, Tcmb0=0.0) # test against ned wright's calculator (cubic Gpc) redshifts = np.array([0.5, 1, 2, 3, 5, 9]) wright_flat = ( np.array([29.123, 159.529, 630.427, 1178.531, 2181.485, 3654.802]) * u.Gpc**3 ) wright_open = ( np.array([20.501, 99.019, 380.278, 747.049, 1558.363, 3123.814]) * u.Gpc**3 ) wright_closed = ( np.array([12.619, 44.708, 114.904, 173.709, 258.82, 358.992]) * u.Gpc**3 ) # The wright calculator isn't very accurate, so we use a rather # modest precision assert u.allclose(c_flat.comoving_volume(redshifts), wright_flat, rtol=1e-2) assert u.allclose(c_open.comoving_volume(redshifts), wright_open, rtol=1e-2) assert u.allclose(c_closed.comoving_volume(redshifts), wright_closed, rtol=1e-2) @pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy") def test_differential_comoving_volume(): from scipy.integrate import quad c_flat = LambdaCDM(H0=70, Om0=0.27, Ode0=0.73, Tcmb0=0.0) c_open = LambdaCDM(H0=70, Om0=0.27, Ode0=0.0, Tcmb0=0.0) c_closed = LambdaCDM(H0=70, Om0=2, Ode0=0.0, Tcmb0=0.0) # test that integration of differential_comoving_volume() # yields same as comoving_volume() redshifts = np.array([0.5, 1, 2, 3, 5, 9]) wright_flat = ( np.array([29.123, 159.529, 630.427, 1178.531, 2181.485, 3654.802]) * u.Gpc**3 ) wright_open = ( np.array([20.501, 99.019, 380.278, 747.049, 1558.363, 3123.814]) * u.Gpc**3 ) wright_closed = ( np.array([12.619, 44.708, 114.904, 173.709, 258.82, 358.992]) * u.Gpc**3 ) # The wright calculator isn't very accurate, so we use a rather # modest precision. def ftemp(x): return c_flat.differential_comoving_volume(x).value def otemp(x): return c_open.differential_comoving_volume(x).value def ctemp(x): return c_closed.differential_comoving_volume(x).value # Multiply by solid_angle (4 * pi) assert u.allclose( np.array([4.0 * np.pi * quad(ftemp, 0, redshift)[0] for redshift in redshifts]) * u.Mpc**3, wright_flat, rtol=1e-2, ) assert u.allclose( np.array([4.0 * np.pi * quad(otemp, 0, redshift)[0] for redshift in redshifts]) * u.Mpc**3, wright_open, rtol=1e-2, ) assert u.allclose( np.array([4.0 * np.pi * quad(ctemp, 0, redshift)[0] for redshift in redshifts]) * u.Mpc**3, wright_closed, rtol=1e-2, ) @pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy") def test_age(): # WMAP7 but with Omega_relativisitic = 0 tcos = FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0) assert u.allclose(tcos.hubble_time, 13.889094057856937 * u.Gyr) assert u.allclose(tcos.age(4), 1.5823603508870991 * u.Gyr) assert u.allclose(tcos.age([1.0, 5.0]), [5.97113193, 1.20553129] * u.Gyr) assert u.allclose(tcos.age([1, 5]), [5.97113193, 1.20553129] * u.Gyr) # Add relativistic species tcos = FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0) assert u.allclose(tcos.age(4), 1.5773003779230699 * u.Gyr) assert u.allclose(tcos.age([1, 5]), [5.96344942, 1.20093077] * u.Gyr) # And massive neutrinos tcos = FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0, m_nu=0.1 * u.eV) assert u.allclose(tcos.age(4), 1.5546485439853412 * u.Gyr) assert u.allclose(tcos.age([1, 5]), [5.88448152, 1.18383759] * u.Gyr) @pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy") def test_distmod(): # WMAP7 but with Omega_relativisitic = 0 tcos = FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0) assert u.allclose(tcos.hubble_distance, 4258.415596590909 * u.Mpc) assert u.allclose(tcos.distmod([1, 5]), [44.124857, 48.40167258] * u.mag) assert u.allclose(tcos.distmod([1.0, 5.0]), [44.124857, 48.40167258] * u.mag) @pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy") def test_neg_distmod(): # Cosmology with negative luminosity distances (perfectly okay, # if obscure) tcos = LambdaCDM(70, 0.2, 1.3, Tcmb0=0) assert u.allclose( tcos.luminosity_distance([50, 100]), [16612.44047622, -46890.79092244] * u.Mpc ) assert u.allclose(tcos.distmod([50, 100]), [46.102167189, 48.355437790944] * u.mag) @pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy") def test_critical_density(): from astropy.constants import codata2014 # WMAP7 but with Omega_relativistic = 0 # These tests will fail if astropy.const starts returning non-mks # units by default; see the comment at the top of core.py. # critical_density0 is inversely proportional to G. tcos = FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0) fac = (const.G / codata2014.G).to(u.dimensionless_unscaled).value assert u.allclose( tcos.critical_density0 * fac, 9.309668456020899e-30 * (u.g / u.cm**3) ) assert u.allclose(tcos.critical_density0, tcos.critical_density(0)) assert u.allclose( tcos.critical_density([1, 5]) * fac, [2.70352772e-29, 5.53739080e-28] * (u.g / u.cm**3), ) assert u.allclose( tcos.critical_density([1.0, 5.0]) * fac, [2.70352772e-29, 5.53739080e-28] * (u.g / u.cm**3), ) @pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy") def test_integral(): # Test integer vs. floating point inputs cosmo = LambdaCDM(H0=73.2, Om0=0.3, Ode0=0.50) assert u.allclose( cosmo.comoving_distance(3), cosmo.comoving_distance(3.0), rtol=1e-7 ) assert u.allclose( cosmo.comoving_distance([1, 2, 3, 5]), cosmo.comoving_distance([1.0, 2.0, 3.0, 5.0]), rtol=1e-7, ) assert u.allclose(cosmo.efunc(6), cosmo.efunc(6.0), rtol=1e-7) assert u.allclose(cosmo.efunc([1, 2, 6]), cosmo.efunc([1.0, 2.0, 6.0]), rtol=1e-7) assert u.allclose( cosmo.inv_efunc([1, 2, 6]), cosmo.inv_efunc([1.0, 2.0, 6.0]), rtol=1e-7 ) @pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy") def test_de_densityscale(): cosmo = LambdaCDM(H0=70, Om0=0.3, Ode0=0.70) z = np.array([0.1, 0.2, 0.5, 1.5, 2.5]) assert u.allclose(cosmo.de_density_scale(z), [1.0, 1.0, 1.0, 1.0, 1.0]) # Integer check assert u.allclose(cosmo.de_density_scale(3), cosmo.de_density_scale(3.0), rtol=1e-7) assert u.allclose( cosmo.de_density_scale([1, 2, 3]), cosmo.de_density_scale([1.0, 2.0, 3.0]), rtol=1e-7, ) @pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy") def test_comoving_distance_z1z2(): tcos = LambdaCDM(100, 0.3, 0.8, Tcmb0=0.0) with pytest.raises(ValueError): # test diff size z1, z2 fail tcos._comoving_distance_z1z2((1, 2), (3, 4, 5)) # Comoving distances are invertible assert u.allclose( tcos._comoving_distance_z1z2(1, 2), -tcos._comoving_distance_z1z2(2, 1) ) z1 = 0, 0, 2, 0.5, 1 z2 = 2, 1, 1, 2.5, 1.1 results = ( 3767.90579253, 2386.25591391, -1381.64987862, 2893.11776663, 174.1524683, ) * u.Mpc assert u.allclose(tcos._comoving_distance_z1z2(z1, z2), results) @pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy") def test_age_in_special_cosmologies(): """Check that age in de Sitter and Einstein-de Sitter Universes work. Some analytic solutions fail at these critical points. """ c_dS = FlatLambdaCDM(100, 0, Tcmb0=0) assert u.allclose(c_dS.age(z=0), np.inf * u.Gyr) assert u.allclose(c_dS.age(z=1), np.inf * u.Gyr) assert u.allclose(c_dS.lookback_time(z=0), 0 * u.Gyr) assert u.allclose(c_dS.lookback_time(z=1), 6.777539216261741 * u.Gyr) c_EdS = FlatLambdaCDM(100, 1, Tcmb0=0) assert u.allclose(c_EdS.age(z=0), 6.518614811154189 * u.Gyr) assert u.allclose(c_EdS.age(z=1), 2.3046783684542738 * u.Gyr) assert u.allclose(c_EdS.lookback_time(z=0), 0 * u.Gyr) assert u.allclose(c_EdS.lookback_time(z=1), 4.213936442699092 * u.Gyr) @pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy") def test_distance_in_special_cosmologies(): """Check that de Sitter and Einstein-de Sitter Universes both work. Some analytic solutions fail at these critical points. """ c_dS = FlatLambdaCDM(100, 0, Tcmb0=0) assert u.allclose(c_dS.comoving_distance(z=0), 0 * u.Mpc) assert u.allclose(c_dS.comoving_distance(z=1), 2997.92458 * u.Mpc) c_EdS = FlatLambdaCDM(100, 1, Tcmb0=0) assert u.allclose(c_EdS.comoving_distance(z=0), 0 * u.Mpc) assert u.allclose(c_EdS.comoving_distance(z=1), 1756.1435599923348 * u.Mpc) c_dS = LambdaCDM(100, 0, 1, Tcmb0=0) assert u.allclose(c_dS.comoving_distance(z=0), 0 * u.Mpc) assert u.allclose(c_dS.comoving_distance(z=1), 2997.92458 * u.Mpc) c_EdS = LambdaCDM(100, 1, 0, Tcmb0=0) assert u.allclose(c_EdS.comoving_distance(z=0), 0 * u.Mpc) assert u.allclose(c_EdS.comoving_distance(z=1), 1756.1435599923348 * u.Mpc) @pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy") def test_absorption_distance(): tcos = FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0) assert u.allclose(tcos.absorption_distance([1, 3]), [1.72576635, 7.98685853]) assert u.allclose(tcos.absorption_distance([1.0, 3.0]), [1.72576635, 7.98685853]) assert u.allclose(tcos.absorption_distance(3), 7.98685853) assert u.allclose(tcos.absorption_distance(3.0), 7.98685853) @pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy") def test_distance_broadcast(): """Test array shape broadcasting for functions with single redshift inputs""" cosmo = FlatLambdaCDM(H0=70, Om0=0.27, m_nu=u.Quantity([0.0, 0.1, 0.011], u.eV)) z = np.linspace(0.1, 1, 6) z_reshape2d = z.reshape(2, 3) z_reshape3d = z.reshape(3, 2, 1) # Things with units methods = [ "comoving_distance", "luminosity_distance", "comoving_transverse_distance", "angular_diameter_distance", "distmod", "lookback_time", "age", "comoving_volume", "differential_comoving_volume", "kpc_comoving_per_arcmin", ] for method in methods: g = getattr(cosmo, method) value_flat = g(z) assert value_flat.shape == z.shape value_2d = g(z_reshape2d) assert value_2d.shape == z_reshape2d.shape value_3d = g(z_reshape3d) assert value_3d.shape == z_reshape3d.shape assert value_flat.unit == value_2d.unit assert value_flat.unit == value_3d.unit assert u.allclose(value_flat, value_2d.flatten()) assert u.allclose(value_flat, value_3d.flatten()) # Also test unitless ones methods = [ "absorption_distance", "Om", "Ode", "Ok", "H", "w", "de_density_scale", "Onu", "Ogamma", "nu_relative_density", ] for method in methods: g = getattr(cosmo, method) value_flat = g(z) assert value_flat.shape == z.shape value_2d = g(z_reshape2d) assert value_2d.shape == z_reshape2d.shape value_3d = g(z_reshape3d) assert value_3d.shape == z_reshape3d.shape assert u.allclose(value_flat, value_2d.flatten()) assert u.allclose(value_flat, value_3d.flatten())
8ff0e4f796ebeedbf73519f38c30a67709d840d6caf3dcb471a69a68299eddf6
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Testing :mod:`astropy.cosmology.flrw.base`.""" ############################################################################## # IMPORTS # STDLIB import abc import copy # THIRD PARTY import numpy as np import pytest import astropy.constants as const # LOCAL import astropy.units as u from astropy.cosmology import FLRW, FlatLambdaCDM, LambdaCDM, Planck18 from astropy.cosmology.core import _COSMOLOGY_CLASSES from astropy.cosmology.flrw.base import _a_B_c2, _critdens_const, _H0units_to_invs, quad from astropy.cosmology.parameter import Parameter from astropy.cosmology.tests.helper import get_redshift_methods from astropy.cosmology.tests.test_core import ( CosmologyTest, FlatCosmologyMixinTest, ParameterTestMixin, invalid_zs, valid_zs, ) from astropy.utils.compat.optional_deps import HAS_SCIPY ############################################################################## # SETUP / TEARDOWN class SubFLRW(FLRW): def w(self, z): return super().w(z) ############################################################################## # TESTS ############################################################################## @pytest.mark.skipif(HAS_SCIPY, reason="scipy is installed") def test_optional_deps_functions(): """Test stand-in functions when optional dependencies not installed.""" with pytest.raises(ModuleNotFoundError, match="No module named 'scipy.integrate'"): quad() ############################################################################## class ParameterH0TestMixin(ParameterTestMixin): """Tests for `astropy.cosmology.Parameter` H0 on a Cosmology. H0 is a descriptor, which are tested by mixin, here with ``TestFLRW``. These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the args and kwargs for the cosmology class, respectively. See ``TestFLRW``. """ def test_H0(self, cosmo_cls, cosmo): """Test Parameter ``H0``.""" unit = u.Unit("km/(s Mpc)") # on the class assert isinstance(cosmo_cls.H0, Parameter) assert "Hubble constant" in cosmo_cls.H0.__doc__ assert cosmo_cls.H0.unit == unit # validation assert cosmo_cls.H0.validate(cosmo, 1) == 1 * unit assert cosmo_cls.H0.validate(cosmo, 10 * unit) == 10 * unit with pytest.raises(ValueError, match="H0 is a non-scalar quantity"): cosmo_cls.H0.validate(cosmo, [1, 2]) # on the instance assert cosmo.H0 is cosmo._H0 assert cosmo.H0 == self._cls_args["H0"] assert isinstance(cosmo.H0, u.Quantity) and cosmo.H0.unit == unit def test_init_H0(self, cosmo_cls, ba): """Test initialization for values of ``H0``.""" # test that it works with units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.H0 == ba.arguments["H0"] # also without units ba.arguments["H0"] = ba.arguments["H0"].value # strip units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.H0.value == ba.arguments["H0"] # fails for non-scalar ba.arguments["H0"] = u.Quantity([70, 100], u.km / u.s / u.Mpc) with pytest.raises(ValueError, match="H0 is a non-scalar quantity"): cosmo_cls(*ba.args, **ba.kwargs) class ParameterOm0TestMixin(ParameterTestMixin): """Tests for `astropy.cosmology.Parameter` Om0 on a Cosmology. Om0 is a descriptor, which are tested by mixin, here with ``TestFLRW``. These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the args and kwargs for the cosmology class, respectively. See ``TestFLRW``. """ def test_Om0(self, cosmo_cls, cosmo): """Test Parameter ``Om0``.""" # on the class assert isinstance(cosmo_cls.Om0, Parameter) assert "Omega matter" in cosmo_cls.Om0.__doc__ # validation assert cosmo_cls.Om0.validate(cosmo, 1) == 1 assert cosmo_cls.Om0.validate(cosmo, 10 * u.one) == 10 with pytest.raises(ValueError, match="Om0 cannot be negative"): cosmo_cls.Om0.validate(cosmo, -1) # on the instance assert cosmo.Om0 is cosmo._Om0 assert cosmo.Om0 == self._cls_args["Om0"] assert isinstance(cosmo.Om0, float) def test_init_Om0(self, cosmo_cls, ba): """Test initialization for values of ``Om0``.""" # test that it works with units ba.arguments["Om0"] = ba.arguments["Om0"] << u.one # ensure units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.Om0 == ba.arguments["Om0"] # also without units ba.arguments["Om0"] = ba.arguments["Om0"].value # strip units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.Om0 == ba.arguments["Om0"] # fails for negative numbers ba.arguments["Om0"] = -0.27 with pytest.raises(ValueError, match="Om0 cannot be negative."): cosmo_cls(*ba.args, **ba.kwargs) class ParameterOde0TestMixin(ParameterTestMixin): """Tests for `astropy.cosmology.Parameter` Ode0 on a Cosmology. Ode0 is a descriptor, which are tested by mixin, here with ``TestFLRW``. These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the args and kwargs for the cosmology class, respectively. See ``TestFLRW``. """ def test_Parameter_Ode0(self, cosmo_cls): """Test Parameter ``Ode0`` on the class.""" assert isinstance(cosmo_cls.Ode0, Parameter) assert "Omega dark energy" in cosmo_cls.Ode0.__doc__ def test_Parameter_Ode0_validation(self, cosmo_cls, cosmo): """Test Parameter ``Ode0`` validation.""" assert cosmo_cls.Ode0.validate(cosmo, 1.1) == 1.1 assert cosmo_cls.Ode0.validate(cosmo, 10 * u.one) == 10.0 with pytest.raises(TypeError, match="only dimensionless"): cosmo_cls.Ode0.validate(cosmo, 10 * u.km) def test_Ode0(self, cosmo): """Test Parameter ``Ode0`` validation.""" # if Ode0 is a parameter, test its value assert cosmo.Ode0 is cosmo._Ode0 assert cosmo.Ode0 == self._cls_args["Ode0"] assert isinstance(cosmo.Ode0, float) def test_init_Ode0(self, cosmo_cls, ba): """Test initialization for values of ``Ode0``.""" # test that it works with units ba.arguments["Ode0"] = ba.arguments["Ode0"] << u.one # ensure units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.Ode0 == ba.arguments["Ode0"] # also without units ba.arguments["Ode0"] = ba.arguments["Ode0"].value # strip units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.Ode0 == ba.arguments["Ode0"] # Setting param to 0 respects that. Note this test uses ``Ode()``. ba.arguments["Ode0"] = 0.0 cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert u.allclose(cosmo.Ode([0, 1, 2, 3]), [0, 0, 0, 0]) assert u.allclose(cosmo.Ode(1), 0) # Must be dimensionless or have no units. Errors otherwise. ba.arguments["Ode0"] = 10 * u.km with pytest.raises(TypeError, match="only dimensionless"): cosmo_cls(*ba.args, **ba.kwargs) class ParameterTcmb0TestMixin(ParameterTestMixin): """Tests for `astropy.cosmology.Parameter` Tcmb0 on a Cosmology. Tcmb0 is a descriptor, which are tested by mixin, here with ``TestFLRW``. These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the args and kwargs for the cosmology class, respectively. See ``TestFLRW``. """ def test_Tcmb0(self, cosmo_cls, cosmo): """Test Parameter ``Tcmb0``.""" # on the class assert isinstance(cosmo_cls.Tcmb0, Parameter) assert "Temperature of the CMB" in cosmo_cls.Tcmb0.__doc__ assert cosmo_cls.Tcmb0.unit == u.K # validation assert cosmo_cls.Tcmb0.validate(cosmo, 1) == 1 * u.K assert cosmo_cls.Tcmb0.validate(cosmo, 10 * u.K) == 10 * u.K with pytest.raises(ValueError, match="Tcmb0 is a non-scalar quantity"): cosmo_cls.Tcmb0.validate(cosmo, [1, 2]) # on the instance assert cosmo.Tcmb0 is cosmo._Tcmb0 assert cosmo.Tcmb0 == self.cls_kwargs["Tcmb0"] assert isinstance(cosmo.Tcmb0, u.Quantity) and cosmo.Tcmb0.unit == u.K def test_init_Tcmb0(self, cosmo_cls, ba): """Test initialization for values of ``Tcmb0``.""" # test that it works with units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.Tcmb0 == ba.arguments["Tcmb0"] # also without units ba.arguments["Tcmb0"] = ba.arguments["Tcmb0"].value # strip units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.Tcmb0.value == ba.arguments["Tcmb0"] # must be a scalar ba.arguments["Tcmb0"] = u.Quantity([0.0, 2], u.K) with pytest.raises(ValueError, match="Tcmb0 is a non-scalar quantity"): cosmo_cls(*ba.args, **ba.kwargs) class ParameterNeffTestMixin(ParameterTestMixin): """Tests for `astropy.cosmology.Parameter` Neff on a Cosmology. Neff is a descriptor, which are tested by mixin, here with ``TestFLRW``. These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the args and kwargs for the cosmology class, respectively. See ``TestFLRW``. """ def test_Neff(self, cosmo_cls, cosmo): """Test Parameter ``Neff``.""" # on the class assert isinstance(cosmo_cls.Neff, Parameter) assert "Number of effective neutrino species" in cosmo_cls.Neff.__doc__ # validation assert cosmo_cls.Neff.validate(cosmo, 1) == 1 assert cosmo_cls.Neff.validate(cosmo, 10 * u.one) == 10 with pytest.raises(ValueError, match="Neff cannot be negative"): cosmo_cls.Neff.validate(cosmo, -1) # on the instance assert cosmo.Neff is cosmo._Neff assert cosmo.Neff == self.cls_kwargs.get("Neff", 3.04) assert isinstance(cosmo.Neff, float) def test_init_Neff(self, cosmo_cls, ba): """Test initialization for values of ``Neff``.""" # test that it works with units ba.arguments["Neff"] = ba.arguments["Neff"] << u.one # ensure units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.Neff == ba.arguments["Neff"] # also without units ba.arguments["Neff"] = ba.arguments["Neff"].value # strip units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.Neff == ba.arguments["Neff"] ba.arguments["Neff"] = -1 with pytest.raises(ValueError): cosmo_cls(*ba.args, **ba.kwargs) class Parameterm_nuTestMixin(ParameterTestMixin): """Tests for `astropy.cosmology.Parameter` m_nu on a Cosmology. m_nu is a descriptor, which are tested by mixin, here with ``TestFLRW``. These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the args and kwargs for the cosmology class, respectively. See ``TestFLRW``. """ def test_m_nu(self, cosmo_cls, cosmo): """Test Parameter ``m_nu``.""" # on the class assert isinstance(cosmo_cls.m_nu, Parameter) assert "Mass of neutrino species" in cosmo_cls.m_nu.__doc__ assert cosmo_cls.m_nu.unit == u.eV assert cosmo_cls.m_nu.equivalencies == u.mass_energy() # on the instance # assert cosmo.m_nu is cosmo._m_nu assert u.allclose(cosmo.m_nu, [0.0, 0.0, 0.0] * u.eV) # set differently depending on the other inputs if cosmo.Tnu0.value == 0: assert cosmo.m_nu is None elif not cosmo._massivenu: # only massless assert u.allclose(cosmo.m_nu, 0 * u.eV) elif self._nmasslessnu == 0: # only massive assert cosmo.m_nu == cosmo._massivenu_mass else: # a mix -- the most complicated case assert u.allclose(cosmo.m_nu[: self._nmasslessnu], 0 * u.eV) assert u.allclose(cosmo.m_nu[self._nmasslessnu], cosmo._massivenu_mass) def test_init_m_nu(self, cosmo_cls, ba): """Test initialization for values of ``m_nu``. Note this requires the class to have a property ``has_massive_nu``. """ # Test that it works when m_nu has units. cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert np.all(cosmo.m_nu == ba.arguments["m_nu"]) # (& checks len, unit) assert not cosmo.has_massive_nu assert cosmo.m_nu.unit == u.eV # explicitly check unit once. # And it works when m_nu doesn't have units. ba.arguments["m_nu"] = ba.arguments["m_nu"].value # strip units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert np.all(cosmo.m_nu.value == ba.arguments["m_nu"]) assert not cosmo.has_massive_nu # A negative m_nu raises an exception. tba = copy.copy(ba) tba.arguments["m_nu"] = u.Quantity([-0.3, 0.2, 0.1], u.eV) with pytest.raises(ValueError, match="invalid"): cosmo_cls(*tba.args, **tba.kwargs) def test_init_m_nu_and_Neff(self, cosmo_cls, ba): """Test initialization for values of ``m_nu`` and ``Neff``. Note this test requires ``Neff`` as constructor input, and a property ``has_massive_nu``. """ # Mismatch with Neff = wrong number of neutrinos tba = copy.copy(ba) tba.arguments["Neff"] = 4.05 tba.arguments["m_nu"] = u.Quantity([0.15, 0.2, 0.1], u.eV) with pytest.raises(ValueError, match="unexpected number of neutrino"): cosmo_cls(*tba.args, **tba.kwargs) # No neutrinos, but Neff tba.arguments["m_nu"] = 0 cosmo = cosmo_cls(*tba.args, **tba.kwargs) assert not cosmo.has_massive_nu assert len(cosmo.m_nu) == 4 assert cosmo.m_nu.unit == u.eV assert u.allclose(cosmo.m_nu, 0 * u.eV) # TODO! move this test when create ``test_nu_relative_density`` assert u.allclose( cosmo.nu_relative_density(1.0), 0.22710731766 * 4.05, rtol=1e-6 ) # All massive neutrinos case, len from Neff tba.arguments["m_nu"] = 0.1 * u.eV cosmo = cosmo_cls(*tba.args, **tba.kwargs) assert cosmo.has_massive_nu assert len(cosmo.m_nu) == 4 assert cosmo.m_nu.unit == u.eV assert u.allclose(cosmo.m_nu, [0.1, 0.1, 0.1, 0.1] * u.eV) def test_init_m_nu_override_by_Tcmb0(self, cosmo_cls, ba): """Test initialization for values of ``m_nu``. Note this test requires ``Tcmb0`` as constructor input, and a property ``has_massive_nu``. """ # If Neff = 0, m_nu is None. tba = copy.copy(ba) tba.arguments["Neff"] = 0 cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.m_nu is None assert not cosmo.has_massive_nu # If Tcmb0 = 0, m_nu is None tba = copy.copy(ba) tba.arguments["Tcmb0"] = 0 cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.m_nu is None assert not cosmo.has_massive_nu class ParameterOb0TestMixin(ParameterTestMixin): """Tests for `astropy.cosmology.Parameter` Ob0 on a Cosmology. Ob0 is a descriptor, which are tested by mixin, here with ``TestFLRW``. These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the args and kwargs for the cosmology class, respectively. See ``TestFLRW``. """ def test_Ob0(self, cosmo_cls, cosmo): """Test Parameter ``Ob0``.""" # on the class assert isinstance(cosmo_cls.Ob0, Parameter) assert "Omega baryon;" in cosmo_cls.Ob0.__doc__ # validation assert cosmo_cls.Ob0.validate(cosmo, None) is None assert cosmo_cls.Ob0.validate(cosmo, 0.1) == 0.1 assert cosmo_cls.Ob0.validate(cosmo, 0.1 * u.one) == 0.1 with pytest.raises(ValueError, match="Ob0 cannot be negative"): cosmo_cls.Ob0.validate(cosmo, -1) with pytest.raises(ValueError, match="baryonic density can not be larger"): cosmo_cls.Ob0.validate(cosmo, cosmo.Om0 + 1) # on the instance assert cosmo.Ob0 is cosmo._Ob0 assert cosmo.Ob0 == 0.03 def test_init_Ob0(self, cosmo_cls, ba): """Test initialization for values of ``Ob0``.""" # test that it works with units assert isinstance(ba.arguments["Ob0"], u.Quantity) cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.Ob0 == ba.arguments["Ob0"] # also without units ba.arguments["Ob0"] = ba.arguments["Ob0"].value # strip units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.Ob0 == ba.arguments["Ob0"] # Setting param to 0 respects that. Note this test uses ``Ob()``. ba.arguments["Ob0"] = 0.0 cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.Ob0 == 0.0 if not self.abstract_w: assert u.allclose(cosmo.Ob(1), 0) assert u.allclose(cosmo.Ob([0, 1, 2, 3]), [0, 0, 0, 0]) # Negative Ob0 errors tba = copy.copy(ba) tba.arguments["Ob0"] = -0.04 with pytest.raises(ValueError, match="Ob0 cannot be negative"): cosmo_cls(*tba.args, **tba.kwargs) # Ob0 > Om0 errors tba.arguments["Ob0"] = tba.arguments["Om0"] + 0.1 with pytest.raises(ValueError, match="baryonic density can not be larger"): cosmo_cls(*tba.args, **tba.kwargs) # No baryons specified means baryon-specific methods fail. tba = copy.copy(ba) tba.arguments.pop("Ob0", None) cosmo = cosmo_cls(*tba.args, **tba.kwargs) with pytest.raises(ValueError): cosmo.Ob(1) # also means DM fraction is undefined with pytest.raises(ValueError): cosmo.Odm(1) # The default value is None assert cosmo_cls._init_signature.parameters["Ob0"].default is None class FLRWTest( CosmologyTest, ParameterH0TestMixin, ParameterOm0TestMixin, ParameterOde0TestMixin, ParameterTcmb0TestMixin, ParameterNeffTestMixin, Parameterm_nuTestMixin, ParameterOb0TestMixin, ): abstract_w = False @abc.abstractmethod def setup_class(self): """Setup for testing.""" super().setup_class(self) # Default cosmology args and kwargs self._cls_args = dict( H0=70 * u.km / u.s / u.Mpc, Om0=0.27 * u.one, Ode0=0.73 * u.one ) self.cls_kwargs = dict( Tcmb0=3.0 * u.K, Ob0=0.03 * u.one, name=self.__class__.__name__, meta={"a": "b"}, ) @pytest.fixture(scope="class") def nonflatcosmo(self): """A non-flat cosmology used in equivalence tests.""" return LambdaCDM(70, 0.4, 0.8) # =============================================================== # Method & Attribute Tests def test_init(self, cosmo_cls): """Test initialization.""" super().test_init(cosmo_cls) # TODO! tests for initializing calculated values, e.g. `h` # TODO! transfer tests for initializing neutrinos def test_init_Tcmb0_zeroing(self, cosmo_cls, ba): """Test if setting Tcmb0 parameter to 0 influences other parameters. TODO: consider moving this test to ``FLRWTest`` """ ba.arguments["Tcmb0"] = 0.0 cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.Ogamma0 == 0.0 assert cosmo.Onu0 == 0.0 if not self.abstract_w: assert u.allclose(cosmo.Ogamma(1.5), [0, 0, 0, 0]) assert u.allclose(cosmo.Ogamma([0, 1, 2, 3]), [0, 0, 0, 0]) assert u.allclose(cosmo.Onu(1.5), [0, 0, 0, 0]) assert u.allclose(cosmo.Onu([0, 1, 2, 3]), [0, 0, 0, 0]) # --------------------------------------------------------------- # Properties def test_Odm0(self, cosmo_cls, cosmo): """Test property ``Odm0``.""" # on the class assert isinstance(cosmo_cls.Odm0, property) assert cosmo_cls.Odm0.fset is None # immutable # on the instance assert cosmo.Odm0 is cosmo._Odm0 # Odm0 can be None, if Ob0 is None. Otherwise DM = matter - baryons. if cosmo.Ob0 is None: assert cosmo.Odm0 is None else: assert np.allclose(cosmo.Odm0, cosmo.Om0 - cosmo.Ob0) def test_Ok0(self, cosmo_cls, cosmo): """Test property ``Ok0``.""" # on the class assert isinstance(cosmo_cls.Ok0, property) assert cosmo_cls.Ok0.fset is None # immutable # on the instance assert cosmo.Ok0 is cosmo._Ok0 assert np.allclose( cosmo.Ok0, 1.0 - (cosmo.Om0 + cosmo.Ode0 + cosmo.Ogamma0 + cosmo.Onu0) ) def test_is_flat(self, cosmo_cls, cosmo): """Test property ``is_flat``.""" # on the class assert isinstance(cosmo_cls.is_flat, property) assert cosmo_cls.is_flat.fset is None # immutable # on the instance assert isinstance(cosmo.is_flat, bool) assert cosmo.is_flat is bool((cosmo.Ok0 == 0.0) and (cosmo.Otot0 == 1.0)) def test_Tnu0(self, cosmo_cls, cosmo): """Test property ``Tnu0``.""" # on the class assert isinstance(cosmo_cls.Tnu0, property) assert cosmo_cls.Tnu0.fset is None # immutable # on the instance assert cosmo.Tnu0 is cosmo._Tnu0 assert cosmo.Tnu0.unit == u.K assert u.allclose(cosmo.Tnu0, 0.7137658555036082 * cosmo.Tcmb0, rtol=1e-5) def test_has_massive_nu(self, cosmo_cls, cosmo): """Test property ``has_massive_nu``.""" # on the class assert isinstance(cosmo_cls.has_massive_nu, property) assert cosmo_cls.has_massive_nu.fset is None # immutable # on the instance if cosmo.Tnu0 == 0: assert cosmo.has_massive_nu is False else: assert cosmo.has_massive_nu is cosmo._massivenu def test_h(self, cosmo_cls, cosmo): """Test property ``h``.""" # on the class assert isinstance(cosmo_cls.h, property) assert cosmo_cls.h.fset is None # immutable # on the instance assert cosmo.h is cosmo._h assert np.allclose(cosmo.h, cosmo.H0.value / 100.0) def test_hubble_time(self, cosmo_cls, cosmo): """Test property ``hubble_time``.""" # on the class assert isinstance(cosmo_cls.hubble_time, property) assert cosmo_cls.hubble_time.fset is None # immutable # on the instance assert cosmo.hubble_time is cosmo._hubble_time assert u.allclose(cosmo.hubble_time, (1 / cosmo.H0) << u.Gyr) def test_hubble_distance(self, cosmo_cls, cosmo): """Test property ``hubble_distance``.""" # on the class assert isinstance(cosmo_cls.hubble_distance, property) assert cosmo_cls.hubble_distance.fset is None # immutable # on the instance assert cosmo.hubble_distance is cosmo._hubble_distance assert cosmo.hubble_distance == (const.c / cosmo._H0).to(u.Mpc) def test_critical_density0(self, cosmo_cls, cosmo): """Test property ``critical_density0``.""" # on the class assert isinstance(cosmo_cls.critical_density0, property) assert cosmo_cls.critical_density0.fset is None # immutable # on the instance assert cosmo.critical_density0 is cosmo._critical_density0 assert cosmo.critical_density0.unit == u.g / u.cm**3 cd0value = _critdens_const * (cosmo.H0.value * _H0units_to_invs) ** 2 assert cosmo.critical_density0.value == cd0value def test_Ogamma0(self, cosmo_cls, cosmo): """Test property ``Ogamma0``.""" # on the class assert isinstance(cosmo_cls.Ogamma0, property) assert cosmo_cls.Ogamma0.fset is None # immutable # on the instance assert cosmo.Ogamma0 is cosmo._Ogamma0 # Ogamma cor \propto T^4/rhocrit expect = _a_B_c2 * cosmo.Tcmb0.value**4 / cosmo.critical_density0.value assert np.allclose(cosmo.Ogamma0, expect) # check absolute equality to 0 if Tcmb0 is 0 if cosmo.Tcmb0 == 0: assert cosmo.Ogamma0 == 0 def test_Onu0(self, cosmo_cls, cosmo): """Test property ``Onu0``.""" # on the class assert isinstance(cosmo_cls.Onu0, property) assert cosmo_cls.Onu0.fset is None # immutable # on the instance assert cosmo.Onu0 is cosmo._Onu0 # neutrino temperature <= photon temperature since the neutrinos # decouple first. if cosmo.has_massive_nu: # Tcmb0 > 0 & has massive # check the expected formula assert cosmo.Onu0 == cosmo.Ogamma0 * cosmo.nu_relative_density(0) # a sanity check on on the ratio of neutrinos to photons # technically it could be 1, but not for any of the tested cases. assert cosmo.nu_relative_density(0) <= 1 elif cosmo.Tcmb0 == 0: assert cosmo.Onu0 == 0 else: # check the expected formula assert cosmo.Onu0 == 0.22710731766 * cosmo._Neff * cosmo.Ogamma0 # and check compatibility with nu_relative_density assert np.allclose( cosmo.nu_relative_density(0), 0.22710731766 * cosmo._Neff ) def test_Otot0(self, cosmo): """Test :attr:`astropy.cosmology.FLRW.Otot0`.""" assert ( cosmo.Otot0 == cosmo.Om0 + cosmo.Ogamma0 + cosmo.Onu0 + cosmo.Ode0 + cosmo.Ok0 ) # --------------------------------------------------------------- # Methods _FLRW_redshift_methods = get_redshift_methods( FLRW, include_private=True, include_z2=False ) @pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed") @pytest.mark.parametrize("z, exc", invalid_zs) @pytest.mark.parametrize("method", _FLRW_redshift_methods) def test_redshift_method_bad_input(self, cosmo, method, z, exc): """Test all the redshift methods for bad input.""" with pytest.raises(exc): getattr(cosmo, method)(z) @pytest.mark.parametrize("z", valid_zs) @abc.abstractmethod def test_w(self, cosmo, z): """Test :meth:`astropy.cosmology.FLRW.w`. Since ``w`` is abstract, each test class needs to define further tests. """ # super().test_w(cosmo, z) # NOT b/c abstract `w(z)` w = cosmo.w(z) assert np.shape(w) == np.shape(z) # test same shape assert u.Quantity(w).unit == u.one # test no units or dimensionless # ------------------------------------------- @pytest.mark.parametrize("z", valid_zs) def test_Otot(self, cosmo, z): """Test :meth:`astropy.cosmology.FLRW.Otot`.""" # super().test_Otot(cosmo) # NOT b/c abstract `w(z)` assert np.allclose( cosmo.Otot(z), cosmo.Om(z) + cosmo.Ogamma(z) + cosmo.Onu(z) + cosmo.Ode(z) + cosmo.Ok(z), ) # --------------------------------------------------------------- def test_efunc_vs_invefunc(self, cosmo): """Test that ``efunc`` and ``inv_efunc`` give inverse values. Note that the test doesn't need scipy because it doesn't need to call ``de_density_scale``. """ # super().test_efunc_vs_invefunc(cosmo) # NOT b/c abstract `w(z)` z0 = 0.5 z = np.array([0.5, 1.0, 2.0, 5.0]) assert np.allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0)) assert np.allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z)) # --------------------------------------------------------------- # from Cosmology def test_clone_change_param(self, cosmo): """Test method ``.clone()`` changing a(many) Parameter(s).""" super().test_clone_change_param(cosmo) # don't change any values kwargs = cosmo._init_arguments.copy() kwargs.pop("name", None) # make sure not setting name kwargs.pop("meta", None) # make sure not setting name c = cosmo.clone(**kwargs) assert c.__class__ == cosmo.__class__ assert c == cosmo # change ``H0`` # Note that H0 affects Ode0 because it changes Ogamma0 c = cosmo.clone(H0=100) assert c.__class__ == cosmo.__class__ assert c.name == cosmo.name + " (modified)" assert c.H0.value == 100 for n in set(cosmo.__parameters__) - {"H0"}: v = getattr(c, n) if v is None: assert v is getattr(cosmo, n) else: assert u.allclose( v, getattr(cosmo, n), atol=1e-4 * getattr(v, "unit", 1) ) assert not u.allclose(c.Ogamma0, cosmo.Ogamma0) assert not u.allclose(c.Onu0, cosmo.Onu0) # change multiple things c = cosmo.clone(name="new name", H0=100, Tcmb0=2.8, meta=dict(zz="tops")) assert c.__class__ == cosmo.__class__ assert c.name == "new name" assert c.H0.value == 100 assert c.Tcmb0.value == 2.8 assert c.meta == {**cosmo.meta, **dict(zz="tops")} for n in set(cosmo.__parameters__) - {"H0", "Tcmb0"}: v = getattr(c, n) if v is None: assert v is getattr(cosmo, n) else: assert u.allclose( v, getattr(cosmo, n), atol=1e-4 * getattr(v, "unit", 1) ) assert not u.allclose(c.Ogamma0, cosmo.Ogamma0) assert not u.allclose(c.Onu0, cosmo.Onu0) assert not u.allclose(c.Tcmb0.value, cosmo.Tcmb0.value) def test_is_equivalent(self, cosmo): """Test :meth:`astropy.cosmology.FLRW.is_equivalent`.""" super().test_is_equivalent(cosmo) # pass to CosmologyTest # test against a FlatFLRWMixin # case (3) in FLRW.is_equivalent if isinstance(cosmo, FlatLambdaCDM): assert cosmo.is_equivalent(Planck18) assert Planck18.is_equivalent(cosmo) else: assert not cosmo.is_equivalent(Planck18) assert not Planck18.is_equivalent(cosmo) # =============================================================== # Usage Tests # TODO: this test should be subsumed by other tests @pytest.mark.parametrize("method", ("Om", "Ode", "w", "de_density_scale")) def test_distance_broadcast(self, cosmo, method): """Test distance methods broadcast z correctly.""" g = getattr(cosmo, method) z = np.linspace(0.1, 1, 6) z2d = z.reshape(2, 3) z3d = z.reshape(3, 2, 1) value_flat = g(z) assert value_flat.shape == z.shape value_2d = g(z2d) assert value_2d.shape == z2d.shape value_3d = g(z3d) assert value_3d.shape == z3d.shape assert u.allclose(value_flat, value_2d.flatten()) assert u.allclose(value_flat, value_3d.flatten()) @pytest.mark.skipif(not HAS_SCIPY, reason="scipy required for this test.") def test_comoving_distance_example(self, cosmo_cls, args, kwargs, expected): """Test :meth:`astropy.cosmology.LambdaCDM.comoving_distance`. These do not come from external codes -- they are just internal checks to make sure nothing changes if we muck with the distance calculators. """ z = np.array([1.0, 2.0, 3.0, 4.0]) cosmo = cosmo_cls(*args, **kwargs) assert u.allclose(cosmo.comoving_distance(z), expected, rtol=1e-4) class TestFLRW(FLRWTest): """Test :class:`astropy.cosmology.FLRW`.""" abstract_w = True def setup_class(self): """ Setup for testing. FLRW is abstract, so tests are done on a subclass. """ super().setup_class(self) # make sure SubCosmology is known _COSMOLOGY_CLASSES["SubFLRW"] = SubFLRW self.cls = SubFLRW def teardown_class(self): super().teardown_class(self) _COSMOLOGY_CLASSES.pop("SubFLRW", None) # =============================================================== # Method & Attribute Tests # --------------------------------------------------------------- # Methods def test_w(self, cosmo): """Test abstract :meth:`astropy.cosmology.FLRW.w`.""" with pytest.raises(NotImplementedError, match="not implemented"): cosmo.w(1) def test_Otot(self, cosmo): """Test :meth:`astropy.cosmology.FLRW.Otot`.""" exception = NotImplementedError if HAS_SCIPY else ModuleNotFoundError with pytest.raises(exception): assert cosmo.Otot(1) def test_efunc_vs_invefunc(self, cosmo): """ Test that efunc and inv_efunc give inverse values. Here they just fail b/c no ``w(z)`` or no scipy. """ exception = NotImplementedError if HAS_SCIPY else ModuleNotFoundError with pytest.raises(exception): cosmo.efunc(0.5) with pytest.raises(exception): cosmo.inv_efunc(0.5) _FLRW_redshift_methods = get_redshift_methods( FLRW, include_private=True, include_z2=False ) - {"w"} @pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed") @pytest.mark.parametrize("z, exc", invalid_zs) @pytest.mark.parametrize("method", _FLRW_redshift_methods) def test_redshift_method_bad_input(self, cosmo, method, z, exc): """Test all the redshift methods for bad input.""" with pytest.raises(exc): getattr(cosmo, method)(z) # =============================================================== # Usage Tests @pytest.mark.skipif(not HAS_SCIPY, reason="scipy required for this test.") @pytest.mark.parametrize("method", ("Om", "Ode", "w", "de_density_scale")) def test_distance_broadcast(self, cosmo, method): with pytest.raises(NotImplementedError): super().test_distance_broadcast(cosmo, method) @pytest.mark.skipif(not HAS_SCIPY, reason="scipy required for this test.") @pytest.mark.parametrize( ("args", "kwargs", "expected"), [((70, 0.27, 0.73), {"Tcmb0": 3.0, "Ob0": 0.03}, None)], ) def test_comoving_distance_example(self, cosmo_cls, args, kwargs, expected): with pytest.raises(NotImplementedError): super().test_comoving_distance_example(cosmo_cls, args, kwargs, expected) # ----------------------------------------------------------------------------- class ParameterFlatOde0TestMixin(ParameterOde0TestMixin): """Tests for `astropy.cosmology.Parameter` Ode0 on a flat Cosmology. This will augment or override some tests in ``ParameterOde0TestMixin``. Ode0 is a descriptor, which are tested by mixin, here with ``TestFLRW``. These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the args and kwargs for the cosmology class, respectively. See ``TestFLRW``. """ def test_Parameter_Ode0(self, cosmo_cls): """Test Parameter ``Ode0`` on the class.""" super().test_Parameter_Ode0(cosmo_cls) assert cosmo_cls.Ode0.derived in (True, np.True_) def test_Ode0(self, cosmo): """Test no-longer-Parameter ``Ode0``.""" assert cosmo.Ode0 is cosmo._Ode0 assert cosmo.Ode0 == 1.0 - (cosmo.Om0 + cosmo.Ogamma0 + cosmo.Onu0) def test_init_Ode0(self, cosmo_cls, ba): """Test initialization for values of ``Ode0``.""" cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.Ode0 == 1.0 - (cosmo.Om0 + cosmo.Ogamma0 + cosmo.Onu0 + cosmo.Ok0) # Ode0 is not in the signature with pytest.raises(TypeError, match="Ode0"): cosmo_cls(*ba.args, **ba.kwargs, Ode0=1) class FlatFLRWMixinTest(FlatCosmologyMixinTest, ParameterFlatOde0TestMixin): """Tests for :class:`astropy.cosmology.FlatFLRWMixin` subclasses. E.g to use this class:: class TestFlatSomeFLRW(FlatFLRWMixinTest, TestSomeFLRW): ... """ def setup_class(self): """Setup for testing. Set up as for regular FLRW test class, but remove dark energy component since flat cosmologies are forbidden Ode0 as an argument, see ``test_init_subclass``. """ super().setup_class(self) self._cls_args.pop("Ode0") # =============================================================== # Method & Attribute Tests # --------------------------------------------------------------- # class-level def test_init_subclass(self, cosmo_cls): """Test initializing subclass, mostly that can't have Ode0 in init.""" super().test_init_subclass(cosmo_cls) with pytest.raises(TypeError, match="subclasses of"): class HASOde0SubClass(cosmo_cls): def __init__(self, Ode0): pass _COSMOLOGY_CLASSES.pop(HASOde0SubClass.__qualname__, None) # --------------------------------------------------------------- # instance-level def test_init(self, cosmo_cls): super().test_init(cosmo_cls) cosmo = cosmo_cls(*self.cls_args, **self.cls_kwargs) assert cosmo._Ok0 == 0.0 assert cosmo._Ode0 == 1.0 - ( cosmo._Om0 + cosmo._Ogamma0 + cosmo._Onu0 + cosmo._Ok0 ) def test_Ok0(self, cosmo_cls, cosmo): """Test property ``Ok0``.""" super().test_Ok0(cosmo_cls, cosmo) # for flat cosmologies, Ok0 is not *close* to 0, it *is* 0 assert cosmo.Ok0 == 0.0 def test_Otot0(self, cosmo): """Test :attr:`astropy.cosmology.FLRW.Otot0`. Should always be 1.""" super().test_Otot0(cosmo) # for flat cosmologies, Otot0 is not *close* to 1, it *is* 1 assert cosmo.Otot0 == 1.0 @pytest.mark.parametrize("z", valid_zs) def test_Otot(self, cosmo, z): """Test :meth:`astropy.cosmology.FLRW.Otot`. Should always be 1.""" super().test_Otot(cosmo, z) # for flat cosmologies, Otot is 1, within precision. assert u.allclose(cosmo.Otot(z), 1.0) @pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed") @pytest.mark.parametrize("z, exc", invalid_zs) @pytest.mark.parametrize("method", FLRWTest._FLRW_redshift_methods - {"Otot"}) def test_redshift_method_bad_input(self, cosmo, method, z, exc): """Test all the redshift methods for bad input.""" super().test_redshift_method_bad_input(cosmo, method, z, exc) # --------------------------------------------------------------- def test_clone_to_nonflat_change_param(self, cosmo): """Test method ``.clone()`` changing a(many) Parameter(s).""" super().test_clone_to_nonflat_change_param(cosmo) # change Ode0, without non-flat with pytest.raises(TypeError): cosmo.clone(Ode0=1) # change to non-flat nc = cosmo.clone(to_nonflat=True, Ode0=cosmo.Ode0) assert isinstance(nc, cosmo.__nonflatclass__) assert nc == cosmo.nonflat nc = cosmo.clone(to_nonflat=True, Ode0=1) assert nc.Ode0 == 1.0 assert nc.name == cosmo.name + " (modified)" # --------------------------------------------------------------- def test_is_equivalent(self, cosmo, nonflatcosmo): """Test :meth:`astropy.cosmology.FLRW.is_equivalent`.""" super().test_is_equivalent(cosmo) # pass to TestFLRW # against non-flat Cosmology assert not cosmo.is_equivalent(nonflatcosmo) assert not nonflatcosmo.is_equivalent(cosmo) # non-flat version of class nonflat_cosmo_cls = cosmo.__nonflatclass__ # keys check in `test_is_equivalent_nonflat_class_different_params` # non-flat nonflat = nonflat_cosmo_cls(*self.cls_args, Ode0=0.9, **self.cls_kwargs) assert not nonflat.is_equivalent(cosmo) assert not cosmo.is_equivalent(nonflat) # flat, but not FlatFLRWMixin flat = nonflat_cosmo_cls( *self.cls_args, Ode0=1.0 - cosmo.Om0 - cosmo.Ogamma0 - cosmo.Onu0, **self.cls_kwargs ) flat._Ok0 = 0.0 assert flat.is_equivalent(cosmo) assert cosmo.is_equivalent(flat) def test_repr(self, cosmo_cls, cosmo): """ Test method ``.__repr__()``. Skip non-flat superclass test. e.g. `TestFlatLambdaCDDM` -> `FlatFLRWMixinTest` vs `TestFlatLambdaCDDM` -> `TestLambdaCDDM` -> `FlatFLRWMixinTest` """ FLRWTest.test_repr(self, cosmo_cls, cosmo) # test eliminated Ode0 from parameters assert "Ode0" not in repr(cosmo)
11761a795e6d06b7eb106cb6be4aa2f9c1fd938ec585f3ff524e6ba363eb1278
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Testing :mod:`astropy.cosmology.flrw.lambdacdm`.""" ############################################################################## # IMPORTS import numpy as np import pytest import astropy.units as u from astropy.cosmology import FLRW, wCDM from astropy.utils.compat.optional_deps import HAS_SCIPY ############################################################################## # TYPES class W1(FLRW): """ This class is to test whether the routines work correctly if one only overloads w(z). """ def __init__(self): super().__init__(70.0, 0.27, 0.73, Tcmb0=0.0, name="test_cos") self._w0 = -0.9 def w(self, z): return self._w0 * np.ones_like(z) class W1nu(FLRW): """Similar, but with neutrinos.""" def __init__(self): super().__init__( 70.0, 0.27, 0.73, Tcmb0=3.0, m_nu=0.1 * u.eV, name="test_cos_nu" ) self._w0 = -0.8 def w(self, z): return self._w0 * np.ones_like(z) ############################################################################## # TESTS ############################################################################## @pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy") def test_de_subclass(): z = [0.2, 0.4, 0.6, 0.9] # This is the comparison object cosmo = wCDM(H0=70, Om0=0.27, Ode0=0.73, w0=-0.9, Tcmb0=0.0) # Values taken from Ned Wrights advanced cosmo calculator, Aug 17 2012 assert u.allclose( cosmo.luminosity_distance(z), [975.5, 2158.2, 3507.3, 5773.1] * u.Mpc, rtol=1e-3 ) # Now try the subclass that only gives w(z) cosmo = W1() assert u.allclose( cosmo.luminosity_distance(z), [975.5, 2158.2, 3507.3, 5773.1] * u.Mpc, rtol=1e-3 ) # Test efunc assert u.allclose(cosmo.efunc(1.0), 1.7489240754, rtol=1e-5) assert u.allclose(cosmo.efunc([0.5, 1.0]), [1.31744953, 1.7489240754], rtol=1e-5) assert u.allclose(cosmo.inv_efunc([0.5, 1.0]), [0.75904236, 0.57178011], rtol=1e-5) # Test de_density_scale assert u.allclose(cosmo.de_density_scale(1.0), 1.23114444, rtol=1e-4) assert u.allclose( cosmo.de_density_scale([0.5, 1.0]), [1.12934694, 1.23114444], rtol=1e-4 ) @pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy") def test_efunc_vs_invefunc_flrw(): """Test that efunc and inv_efunc give inverse values""" z0 = 0.5 z = np.array([0.5, 1.0, 2.0, 5.0]) # FLRW is abstract, so requires W1 defined earlier # This requires scipy, unlike the built-ins, because it # calls de_density_scale, which has an integral in it cosmo = W1() assert u.allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0)) assert u.allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z)) # Add neutrinos cosmo = W1nu() assert u.allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0)) assert u.allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
f7e960f34f4d0d05d1875c9b3cbcac877058c341b76ca231f7aaa0fc21c565e4
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Testing :mod:`astropy.cosmology.flrw.w0wzcdm`.""" ############################################################################## # IMPORTS # THIRD PARTY import numpy as np import pytest # LOCAL import astropy.units as u from astropy.cosmology import w0wzCDM from astropy.cosmology.parameter import Parameter from astropy.cosmology.tests.test_core import ParameterTestMixin from astropy.utils.compat.optional_deps import HAS_SCIPY from .test_base import FLRWTest from .test_w0cdm import Parameterw0TestMixin ############################################################################## # TESTS ############################################################################## class ParameterwzTestMixin(ParameterTestMixin): """Tests for `astropy.cosmology.Parameter` wz on a Cosmology. wz is a descriptor, which are tested by mixin, here with ``TestFLRW``. These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the args and kwargs for the cosmology class, respectively. See ``TestFLRW``. """ def test_wz(self, cosmo_cls, cosmo): """Test Parameter ``wz``.""" # on the class assert isinstance(cosmo_cls.wz, Parameter) assert "Derivative of the dark energy" in cosmo_cls.wz.__doc__ assert cosmo_cls.wz.unit is None # on the instance assert cosmo.wz is cosmo._wz assert cosmo.wz == self.cls_kwargs["wz"] def test_init_wz(self, cosmo_cls, ba): """Test initialization for values of ``wz``.""" # test that it works with units ba.arguments["wz"] = ba.arguments["wz"] << u.one # ensure units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.wz == ba.arguments["wz"] # also without units ba.arguments["wz"] = ba.arguments["wz"].value # strip units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.wz == ba.arguments["wz"] # must be dimensionless ba.arguments["wz"] = 10 * u.km with pytest.raises(TypeError): cosmo_cls(*ba.args, **ba.kwargs) class Testw0wzCDM(FLRWTest, Parameterw0TestMixin, ParameterwzTestMixin): """Test :class:`astropy.cosmology.w0wzCDM`.""" def setup_class(self): """Setup for testing.""" super().setup_class(self) self.cls = w0wzCDM self.cls_kwargs.update(w0=-1, wz=0.5) # =============================================================== # Method & Attribute Tests def test_clone_change_param(self, cosmo): """Test method ``.clone()`` changing a(many) Parameter(s).""" super().test_clone_change_param(cosmo) # `w` params c = cosmo.clone(w0=0.1, wz=0.2) assert c.w0 == 0.1 assert c.wz == 0.2 for n in set(cosmo.__parameters__) - {"w0", "wz"}: v = getattr(c, n) if v is None: assert v is getattr(cosmo, n) else: assert u.allclose( v, getattr(cosmo, n), atol=1e-4 * getattr(v, "unit", 1) ) # @pytest.mark.parametrize("z", valid_zs) # TODO! recompute comparisons below def test_w(self, cosmo): """Test :meth:`astropy.cosmology.w0wzCDM.w`.""" # super().test_w(cosmo, z) assert u.allclose(cosmo.w(1.0), -0.5) assert u.allclose( cosmo.w([0.0, 0.5, 1.0, 1.5, 2.3]), [-1.0, -0.75, -0.5, -0.25, 0.15] ) def test_repr(self, cosmo_cls, cosmo): """Test method ``.__repr__()``.""" super().test_repr(cosmo_cls, cosmo) expected = ( 'w0wzCDM(name="ABCMeta", H0=70.0 km / (Mpc s), Om0=0.27,' " Ode0=0.73, w0=-1.0, wz=0.5, Tcmb0=3.0 K, Neff=3.04," " m_nu=[0. 0. 0.] eV, Ob0=0.03)" ) assert repr(cosmo) == expected # =============================================================== # Usage Tests @pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed") @pytest.mark.parametrize( ("args", "kwargs", "expected"), [ ( # no relativistic species (75.0, 0.3, 0.6), {"w0": -0.9, "wz": 0.1, "Tcmb0": 0.0}, [3051.68786716, 4756.17714818, 5822.38084257, 6562.70873734] * u.Mpc, ), ( # massless neutrinos (75.0, 0.25, 0.5), { "w0": -0.9, "wz": 0.1, "Tcmb0": 3.0, "Neff": 3, "m_nu": u.Quantity(0.0, u.eV), }, [2997.8115653, 4686.45599916, 5764.54388557, 6524.17408738] * u.Mpc, ), ( # massive neutrinos (75.0, 0.25, 0.5), { "w0": -0.9, "wz": 0.1, "Tcmb0": 3.0, "Neff": 4, "m_nu": u.Quantity(5.0, u.eV), }, [2676.73467639, 3940.57967585, 4686.90810278, 5191.54178243] * u.Mpc, ), ], ) def test_comoving_distance_example(self, cosmo_cls, args, kwargs, expected): """Test :meth:`astropy.cosmology.LambdaCDM.comoving_distance`. These do not come from external codes -- they are just internal checks to make sure nothing changes if we muck with the distance calculators. """ super().test_comoving_distance_example(cosmo_cls, args, kwargs, expected) ############################################################################## # Miscellaneous # TODO: these should be better integrated into the new test framework @pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy") def test_de_densityscale(): cosmo = w0wzCDM(H0=70, Om0=0.3, Ode0=0.50, w0=-1, wz=0.5) z = np.array([0.1, 0.2, 0.5, 1.5, 2.5]) assert u.allclose( cosmo.de_density_scale(z), [0.746048, 0.5635595, 0.25712378, 0.026664129, 0.0035916468], rtol=1e-4, ) assert u.allclose(cosmo.de_density_scale(3), cosmo.de_density_scale(3.0), rtol=1e-7) assert u.allclose( cosmo.de_density_scale([1, 2, 3]), cosmo.de_density_scale([1.0, 2.0, 3.0]), rtol=1e-7, )
4a0518e3525e6a9427be52aa5df022e8f973a6f110c3aa820e79f0751d91d096
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Testing :mod:`astropy.cosmology.flrw.w0wacdm`.""" ############################################################################## # IMPORTS # THIRD PARTY import numpy as np import pytest # LOCAL import astropy.units as u from astropy.cosmology import Flatw0waCDM, Planck18, w0waCDM from astropy.cosmology.parameter import Parameter from astropy.cosmology.tests.test_core import ParameterTestMixin from astropy.utils.compat.optional_deps import HAS_SCIPY from .test_base import FlatFLRWMixinTest, FLRWTest from .test_w0cdm import Parameterw0TestMixin ############################################################################## # TESTS ############################################################################## class ParameterwaTestMixin(ParameterTestMixin): """Tests for `astropy.cosmology.Parameter` wa on a Cosmology. wa is a descriptor, which are tested by mixin, here with ``TestFLRW``. These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the args and kwargs for the cosmology class, respectively. See ``TestFLRW``. """ def test_wa(self, cosmo_cls, cosmo): """Test Parameter ``wa``.""" # on the class assert isinstance(cosmo_cls.wa, Parameter) assert "Negative derivative" in cosmo_cls.wa.__doc__ assert cosmo_cls.wa.unit is None # on the instance assert cosmo.wa is cosmo._wa assert cosmo.wa == self.cls_kwargs["wa"] def test_init_wa(self, cosmo_cls, ba): """Test initialization for values of ``wa``.""" # test that it works with units ba.arguments["wa"] = ba.arguments["wa"] << u.one # ensure units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.wa == ba.arguments["wa"] # also without units ba.arguments["wa"] = ba.arguments["wa"].value # strip units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.wa == ba.arguments["wa"] # must be dimensionless ba.arguments["wa"] = 10 * u.km with pytest.raises(TypeError): cosmo_cls(*ba.args, **ba.kwargs) class Testw0waCDM(FLRWTest, Parameterw0TestMixin, ParameterwaTestMixin): """Test :class:`astropy.cosmology.w0waCDM`.""" def setup_class(self): """Setup for testing.""" super().setup_class(self) self.cls = w0waCDM self.cls_kwargs.update(w0=-1, wa=-0.5) # =============================================================== # Method & Attribute Tests def test_clone_change_param(self, cosmo): """Test method ``.clone()`` changing a(many) Parameter(s).""" super().test_clone_change_param(cosmo) # `w` params c = cosmo.clone(w0=0.1, wa=0.2) assert c.w0 == 0.1 assert c.wa == 0.2 for n in set(cosmo.__parameters__) - {"w0", "wa"}: v = getattr(c, n) if v is None: assert v is getattr(cosmo, n) else: assert u.allclose( v, getattr(cosmo, n), atol=1e-4 * getattr(v, "unit", 1) ) # @pytest.mark.parametrize("z", valid_zs) # TODO! recompute comparisons below def test_w(self, cosmo): """Test :meth:`astropy.cosmology.w0waCDM.w`.""" # super().test_w(cosmo, z) assert u.allclose(cosmo.w(1.0), -1.25) assert u.allclose( cosmo.w([0.0, 0.5, 1.0, 1.5, 2.3]), [-1, -1.16666667, -1.25, -1.3, -1.34848485], ) def test_repr(self, cosmo_cls, cosmo): """Test method ``.__repr__()``.""" super().test_repr(cosmo_cls, cosmo) expected = ( 'w0waCDM(name="ABCMeta", H0=70.0 km / (Mpc s), Om0=0.27,' " Ode0=0.73, w0=-1.0, wa=-0.5, Tcmb0=3.0 K, Neff=3.04," " m_nu=[0. 0. 0.] eV, Ob0=0.03)" ) assert repr(cosmo) == expected # =============================================================== # Usage Tests @pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed") @pytest.mark.parametrize( ("args", "kwargs", "expected"), [ ( # no relativistic species (75.0, 0.3, 0.6), {"w0": -0.9, "wa": 0.1, "Tcmb0": 0.0}, [2937.7807638, 4572.59950903, 5611.52821924, 6339.8549956] * u.Mpc, ), ( # massless neutrinos (75.0, 0.25, 0.5), { "w0": -0.9, "wa": 0.1, "Tcmb0": 3.0, "Neff": 3, "m_nu": u.Quantity(0.0, u.eV), }, [2907.34722624, 4539.01723198, 5593.51611281, 6342.3228444] * u.Mpc, ), ( # massive neutrinos (75.0, 0.25, 0.5), { "w0": -0.9, "wa": 0.1, "Tcmb0": 3.0, "Neff": 3, "m_nu": u.Quantity(10.0, u.eV), }, [2507.18336722, 3633.33231695, 4292.44746919, 4736.35404638] * u.Mpc, ), ], ) def test_comoving_distance_example(self, cosmo_cls, args, kwargs, expected): """Test :meth:`astropy.cosmology.LambdaCDM.comoving_distance`. These do not come from external codes -- they are just internal checks to make sure nothing changes if we muck with the distance calculators. """ super().test_comoving_distance_example(cosmo_cls, args, kwargs, expected) # ----------------------------------------------------------------------------- class TestFlatw0waCDM(FlatFLRWMixinTest, Testw0waCDM): """Test :class:`astropy.cosmology.Flatw0waCDM`.""" def setup_class(self): """Setup for testing.""" super().setup_class(self) self.cls = Flatw0waCDM self.cls_kwargs.update(w0=-1, wa=-0.5) def test_repr(self, cosmo_cls, cosmo): """Test method ``.__repr__()``.""" super().test_repr(cosmo_cls, cosmo) expected = ( 'Flatw0waCDM(name="ABCMeta", H0=70.0 km / (Mpc s),' " Om0=0.27, w0=-1.0, wa=-0.5, Tcmb0=3.0 K, Neff=3.04," " m_nu=[0. 0. 0.] eV, Ob0=0.03)" ) assert repr(cosmo) == expected # =============================================================== # Usage Tests @pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed") @pytest.mark.parametrize( ("args", "kwargs", "expected"), [ ( # no relativistic species (75.0, 0.25), {"w0": -0.95, "wa": 0.15, "Tcmb0": 0.0}, [3123.29892781, 4956.15204302, 6128.15563818, 6948.26480378] * u.Mpc, ), ( # massless neutrinos (75.0, 0.25), { "w0": -0.95, "wa": 0.15, "Tcmb0": 3.0, "Neff": 3, "m_nu": u.Quantity(0.0, u.eV), }, [3122.92671907, 4955.03768936, 6126.25719576, 6945.61856513] * u.Mpc, ), ( # massive neutrinos (75.0, 0.25), { "w0": -0.95, "wa": 0.15, "Tcmb0": 3.0, "Neff": 3, "m_nu": u.Quantity(10.0, u.eV), }, [2337.70072701, 3372.13719963, 3988.6571093, 4409.35399673] * u.Mpc, ), ], ) def test_comoving_distance_example(self, cosmo_cls, args, kwargs, expected): """Test :meth:`astropy.cosmology.LambdaCDM.comoving_distance`. These do not come from external codes -- they are just internal checks to make sure nothing changes if we muck with the distance calculators. """ super().test_comoving_distance_example(cosmo_cls, args, kwargs, expected) ############################################################################## # Comparison to Other Codes @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy.") def test_varyde_lumdist_mathematica(): """Tests a few varying dark energy EOS models against a Mathematica computation.""" z = np.array([0.2, 0.4, 0.9, 1.2]) # w0wa models cosmo = w0waCDM(H0=70, Om0=0.2, Ode0=0.8, w0=-1.1, wa=0.2, Tcmb0=0.0) assert u.allclose( cosmo.luminosity_distance(z), [1004.0, 2268.62, 6265.76, 9061.84] * u.Mpc, rtol=1e-4, ) assert u.allclose(cosmo.de_density_scale(0.0), 1.0, rtol=1e-5) assert u.allclose( cosmo.de_density_scale([0.0, 0.5, 1.5]), [1.0, 0.9246310669529021, 0.9184087000251957], ) cosmo = w0waCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wa=0.0, Tcmb0=0.0) assert u.allclose( cosmo.luminosity_distance(z), [971.667, 2141.67, 5685.96, 8107.41] * u.Mpc, rtol=1e-4, ) cosmo = w0waCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wa=-0.5, Tcmb0=0.0) assert u.allclose( cosmo.luminosity_distance(z), [974.087, 2157.08, 5783.92, 8274.08] * u.Mpc, rtol=1e-4, ) ############################################################################## # Miscellaneous # TODO: these should be better integrated into the new test framework def test_equality(): """Test equality and equivalence.""" # mismatched signatures, both directions. newcosmo = w0waCDM(**Planck18._init_arguments, Ode0=0.6) assert newcosmo != Planck18 assert Planck18 != newcosmo @pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy") def test_de_densityscale(): cosmo = w0waCDM(H0=70, Om0=0.3, Ode0=0.70, w0=-1, wa=-0.5) z = np.array([0.1, 0.2, 0.5, 1.5, 2.5]) assert u.allclose( cosmo.de_density_scale(z), [0.9934201, 0.9767912, 0.897450, 0.622236, 0.4458753], rtol=1e-4, ) assert u.allclose(cosmo.de_density_scale(3), cosmo.de_density_scale(3.0), rtol=1e-7) assert u.allclose( cosmo.de_density_scale([1, 2, 3]), cosmo.de_density_scale([1.0, 2.0, 3.0]), rtol=1e-7, )
384df2eb0e619f0fb11aa7a67483ed08c02b6ac857b5828f0633703593a3fcaa
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Testing :mod:`astropy.cosmology.flrw.w0cdm`.""" ############################################################################## # IMPORTS # THIRD PARTY import numpy as np import pytest # LOCAL import astropy.units as u from astropy.cosmology import FlatwCDM, wCDM from astropy.cosmology.parameter import Parameter from astropy.cosmology.tests.test_core import ParameterTestMixin, valid_zs from astropy.utils.compat.optional_deps import HAS_SCIPY from .test_base import FlatFLRWMixinTest, FLRWTest ############################################################################## # TESTS ############################################################################## class Parameterw0TestMixin(ParameterTestMixin): """Tests for `astropy.cosmology.Parameter` w0 on a Cosmology. w0 is a descriptor, which are tested by mixin, here with ``TestFLRW``. These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the args and kwargs for the cosmology class, respectively. See ``TestFLRW``. """ def test_w0(self, cosmo_cls, cosmo): """Test Parameter ``w0``.""" # on the class assert isinstance(cosmo_cls.w0, Parameter) assert "Dark energy equation of state" in cosmo_cls.w0.__doc__ assert cosmo_cls.w0.unit is None # on the instance assert cosmo.w0 is cosmo._w0 assert cosmo.w0 == self.cls_kwargs["w0"] def test_init_w0(self, cosmo_cls, ba): """Test initialization for values of ``w0``.""" # test that it works with units ba.arguments["w0"] = ba.arguments["w0"] << u.one # ensure units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.w0 == ba.arguments["w0"] # also without units ba.arguments["w0"] = ba.arguments["w0"].value # strip units cosmo = cosmo_cls(*ba.args, **ba.kwargs) assert cosmo.w0 == ba.arguments["w0"] # must be dimensionless ba.arguments["w0"] = 10 * u.km with pytest.raises(TypeError): cosmo_cls(*ba.args, **ba.kwargs) class TestwCDM(FLRWTest, Parameterw0TestMixin): """Test :class:`astropy.cosmology.wCDM`.""" def setup_class(self): """Setup for testing.""" super().setup_class(self) self.cls = wCDM self.cls_kwargs.update(w0=-0.5) # =============================================================== # Method & Attribute Tests def test_clone_change_param(self, cosmo): """Test method ``.clone()`` changing a(many) Parameter(s).""" super().test_clone_change_param(cosmo) # `w` params c = cosmo.clone(w0=0.1) assert c.w0 == 0.1 for n in set(cosmo.__parameters__) - {"w0"}: v = getattr(c, n) if v is None: assert v is getattr(cosmo, n) else: assert u.allclose( v, getattr(cosmo, n), atol=1e-4 * getattr(v, "unit", 1) ) @pytest.mark.parametrize("z", valid_zs) def test_w(self, cosmo, z): """Test :meth:`astropy.cosmology.wCDM.w`.""" super().test_w(cosmo, z) w = cosmo.w(z) assert u.allclose(w, self.cls_kwargs["w0"]) def test_repr(self, cosmo_cls, cosmo): """Test method ``.__repr__()``.""" super().test_repr(cosmo_cls, cosmo) expected = ( 'wCDM(name="ABCMeta", H0=70.0 km / (Mpc s), Om0=0.27,' " Ode0=0.73, w0=-0.5, Tcmb0=3.0 K, Neff=3.04," " m_nu=[0. 0. 0.] eV, Ob0=0.03)" ) assert repr(cosmo) == expected # =============================================================== # Usage Tests @pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed") @pytest.mark.parametrize( ("args", "kwargs", "expected"), [ ( # no relativistic species (75.0, 0.25, 0.4), {"w0": -0.9, "Tcmb0": 0.0}, [2849.6163356, 4428.71661565, 5450.97862778, 6179.37072324] * u.Mpc, ), ( # massless neutrinos (75.0, 0.25, 0.4), {"w0": -1.1, "Tcmb0": 3.0, "Neff": 3, "m_nu": u.Quantity(0.0, u.eV)}, [2904.35580229, 4511.11471267, 5543.43643353, 6275.9206788] * u.Mpc, ), ( # massive neutrinos (75.0, 0.25, 0.4), {"w0": -0.9, "Tcmb0": 3.0, "Neff": 3, "m_nu": u.Quantity(10.0, u.eV)}, [2473.32522734, 3581.54519631, 4232.41674426, 4671.83818117] * u.Mpc, ), ], ) def test_comoving_distance_example(self, cosmo_cls, args, kwargs, expected): """Test :meth:`astropy.cosmology.LambdaCDM.comoving_distance`. These do not come from external codes -- they are just internal checks to make sure nothing changes if we muck with the distance calculators. """ super().test_comoving_distance_example(cosmo_cls, args, kwargs, expected) # ----------------------------------------------------------------------------- class TestFlatwCDM(FlatFLRWMixinTest, TestwCDM): """Test :class:`astropy.cosmology.FlatwCDM`.""" def setup_class(self): """Setup for testing.""" super().setup_class(self) self.cls = FlatwCDM self.cls_kwargs.update(w0=-0.5) def test_repr(self, cosmo_cls, cosmo): """Test method ``.__repr__()``.""" super().test_repr(cosmo_cls, cosmo) expected = ( 'FlatwCDM(name="ABCMeta", H0=70.0 km / (Mpc s), Om0=0.27,' " w0=-0.5, Tcmb0=3.0 K, Neff=3.04, m_nu=[0. 0. 0.] eV," " Ob0=0.03)" ) assert repr(cosmo) == expected # =============================================================== # Usage Tests @pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed") @pytest.mark.parametrize( ("args", "kwargs", "expected"), [ ( # no relativistic species (75.0, 0.25), {"w0": -1.05, "Tcmb0": 0.0}, [3216.8296894, 5117.2097601, 6317.05995437, 7149.68648536] * u.Mpc, ), ( # massless neutrinos (75.0, 0.25), {"w0": -0.95, "Tcmb0": 3.0, "Neff": 3, "m_nu": u.Quantity(0.0, u.eV)}, [3143.56537758, 5000.32196494, 6184.11444601, 7009.80166062] * u.Mpc, ), ( # massive neutrinos (75.0, 0.25), {"w0": -0.9, "Tcmb0": 3.0, "Neff": 3, "m_nu": u.Quantity(10.0, u.eV)}, [2337.76035371, 3372.1971387, 3988.71362289, 4409.40817174] * u.Mpc, ), ], ) def test_comoving_distance_example(self, cosmo_cls, args, kwargs, expected): """Test :meth:`astropy.cosmology.LambdaCDM.comoving_distance`. These do not come from external codes -- they are just internal checks to make sure nothing changes if we muck with the distance calculators. """ super().test_comoving_distance_example(cosmo_cls, args, kwargs, expected) ############################################################################## # Miscellaneous # TODO: these should be better integrated into the new test framework @pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy") def test_de_densityscale(): cosmo = wCDM(H0=70, Om0=0.3, Ode0=0.60, w0=-0.5) z = np.array([0.1, 0.2, 0.5, 1.5, 2.5]) assert u.allclose( cosmo.de_density_scale(z), [1.15369, 1.31453, 1.83712, 3.95285, 6.5479], rtol=1e-4, ) assert u.allclose(cosmo.de_density_scale(3), cosmo.de_density_scale(3.0), rtol=1e-7) assert u.allclose( cosmo.de_density_scale([1, 2, 3]), cosmo.de_density_scale([1.0, 2.0, 3.0]), rtol=1e-7, )
71779147f5095cc4c83cc8183eb3bd7bcf8fb82361a0cd77ed2ef581878fffbe
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Astropy is a package intended to contain core functionality and some common tools needed for performing astronomy and astrophysics research with Python. It also provides an index for other astronomy packages and tools for managing them. """ import sys from pathlib import Path from .version import version as __version__ # The location of the online documentation for astropy # This location will normally point to the current released version of astropy online_docs_root = "https://docs.astropy.org/en/{}/".format( "latest" if "dev" in __version__ else f"v{__version__}" ) from . import config as _config class Conf(_config.ConfigNamespace): """ Configuration parameters for `astropy`. """ unicode_output = _config.ConfigItem( False, "When True, use Unicode characters when outputting values, and " "displaying widgets at the console.", ) use_color = _config.ConfigItem( sys.platform != "win32", "When True, use ANSI color escape sequences when writing to the console.", aliases=["astropy.utils.console.USE_COLOR", "astropy.logger.USE_COLOR"], ) max_lines = _config.ConfigItem( None, description=( "Maximum number of lines in the display of pretty-printed " "objects. If not provided, try to determine automatically from the " "terminal size. Negative numbers mean no limit." ), cfgtype="integer(default=None)", aliases=["astropy.table.pprint.max_lines"], ) max_width = _config.ConfigItem( None, description=( "Maximum number of characters per line in the display of " "pretty-printed objects. If not provided, try to determine " "automatically from the terminal size. Negative numbers mean no " "limit." ), cfgtype="integer(default=None)", aliases=["astropy.table.pprint.max_width"], ) conf = Conf() # Define a base ScienceState for configuring constants and units from .utils.state import ScienceState class base_constants_version(ScienceState): """ Base class for the real version-setters below. """ _value = "test" _versions = dict(test="test") @classmethod def validate(cls, value): if value not in cls._versions: raise ValueError(f"Must be one of {list(cls._versions.keys())}") return cls._versions[value] @classmethod def set(cls, value): """ Set the current constants value. """ import sys if "astropy.units" in sys.modules: raise RuntimeError("astropy.units is already imported") if "astropy.constants" in sys.modules: raise RuntimeError("astropy.constants is already imported") return super().set(value) class physical_constants(base_constants_version): """ The version of physical constants to use. """ # Maintainers: update when new constants are added _value = "codata2018" _versions = dict( codata2018="codata2018", codata2014="codata2014", codata2010="codata2010", astropyconst40="codata2018", astropyconst20="codata2014", astropyconst13="codata2010", ) class astronomical_constants(base_constants_version): """ The version of astronomical constants to use. """ # Maintainers: update when new constants are added _value = "iau2015" _versions = dict( iau2015="iau2015", iau2012="iau2012", astropyconst40="iau2015", astropyconst20="iau2015", astropyconst13="iau2012", ) # Create the test() function from .tests.runner import TestRunner test = TestRunner.make_test_runner_in(__path__[0]) # if we are *not* in setup mode, import the logger and possibly populate the # configuration file with the defaults def _initialize_astropy(): try: from .utils import _compiler except ImportError: # If this __init__.py file is in ./astropy/ then import is within a source # dir .astropy-root is a file distributed with the source, but that should # not installed if (Path(__file__).parent.parent / ".astropy-root").exists(): raise ImportError( "You appear to be trying to import astropy from " "within a source checkout or from an editable " "installation without building the extension " "modules first. Either run:\n\n" " pip install -e .\n\nor\n\n" " python setup.py build_ext --inplace\n\n" "to make sure the extension modules are built " ) from None # Outright broken installation, just raise standard error raise # Set the bibtex entry to the article referenced in CITATION. def _get_bibtex(): refs = (Path(__file__).parent / "CITATION").read_text().split("@ARTICLE")[1:] return f"@ARTICLE{refs[0]}" if refs else "" __citation__ = __bibtex__ = _get_bibtex() from .logger import _init_log, _teardown_log log = _init_log() _initialize_astropy() from .utils.misc import find_api_page def online_help(query): """ Search the online Astropy documentation for the given query. Opens the results in the default web browser. Requires an active Internet connection. Parameters ---------- query : str The search query. """ import webbrowser from urllib.parse import urlencode url = online_docs_root + f"search.html?{urlencode({'q': query})}" webbrowser.open(url) __dir_inc__ = [ "__version__", "__githash__", "__bibtex__", "test", "log", "find_api_page", "online_help", "online_docs_root", "conf", "physical_constants", "astronomical_constants", ] from types import ModuleType as __module_type__ # Clean up top-level namespace--delete everything that isn't in __dir_inc__ # or is a magic attribute, and that isn't a submodule of this package for varname in dir(): if not ( (varname.startswith("__") and varname.endswith("__")) or varname in __dir_inc__ or ( varname[0] != "_" and isinstance(locals()[varname], __module_type__) and locals()[varname].__name__.startswith(__name__ + ".") ) ): # The last clause in the the above disjunction deserves explanation: # When using relative imports like ``from .. import config``, the # ``config`` variable is automatically created in the namespace of # whatever module ``..`` resolves to (in this case astropy). This # happens a few times just in the module setup above. This allows # the cleanup to keep any public submodules of the astropy package del locals()[varname] del varname, __module_type__
3f14e1570388983ced21600b79ed91908b11fe420669b69093dc706ac5129c3f
# Licensed under a 3-clause BSD style license - see LICENSE.rst """This module defines a logging class based on the built-in logging module. .. note:: This module is meant for internal ``astropy`` usage. For use in other packages, we recommend implementing your own logger instead. """ import inspect import logging import os import sys import warnings from contextlib import contextmanager from . import conf as _conf from . import config as _config from .utils import find_current_module from .utils.exceptions import AstropyUserWarning, AstropyWarning __all__ = ["Conf", "conf", "log", "AstropyLogger", "LoggingError"] # import the logging levels from logging so that one can do: # log.setLevel(log.DEBUG), for example logging_levels = [ "NOTSET", "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL", "FATAL", ] for level in logging_levels: globals()[level] = getattr(logging, level) __all__ += logging_levels # Initialize by calling _init_log() log = None class LoggingError(Exception): """ This exception is for various errors that occur in the astropy logger, typically when activating or deactivating logger-related features. """ class _AstLogIPYExc(Exception): """ An exception that is used only as a placeholder to indicate to the IPython exception-catching mechanism that the astropy exception-capturing is activated. It should not actually be used as an exception anywhere. """ class Conf(_config.ConfigNamespace): """ Configuration parameters for `astropy.logger`. """ log_level = _config.ConfigItem( "INFO", "Threshold for the logging messages. Logging " "messages that are less severe than this level " "will be ignored. The levels are ``'DEBUG'``, " "``'INFO'``, ``'WARNING'``, ``'ERROR'``.", ) log_warnings = _config.ConfigItem(True, "Whether to log `warnings.warn` calls.") log_exceptions = _config.ConfigItem( False, "Whether to log exceptions before raising them." ) log_to_file = _config.ConfigItem( False, "Whether to always log messages to a log file." ) log_file_path = _config.ConfigItem( "", "The file to log messages to. If empty string is given, " "it defaults to a file ``'astropy.log'`` in " "the astropy config directory.", ) log_file_level = _config.ConfigItem( "INFO", "Threshold for logging messages to `log_file_path`." ) log_file_format = _config.ConfigItem( "%(asctime)r, %(origin)r, %(levelname)r, %(message)r", "Format for log file entries.", ) log_file_encoding = _config.ConfigItem( "", "The encoding (e.g., UTF-8) to use for the log file. If empty string " "is given, it defaults to the platform-preferred encoding.", ) conf = Conf() def _init_log(): """Initializes the Astropy log--in most circumstances this is called automatically when importing astropy. """ global log orig_logger_cls = logging.getLoggerClass() logging.setLoggerClass(AstropyLogger) try: log = logging.getLogger("astropy") log._set_defaults() finally: logging.setLoggerClass(orig_logger_cls) return log def _teardown_log(): """Shut down exception and warning logging (if enabled) and clear all Astropy loggers from the logging module's cache. This involves poking some logging module internals, so much if it is 'at your own risk' and is allowed to pass silently if any exceptions occur. """ global log if log.exception_logging_enabled(): log.disable_exception_logging() if log.warnings_logging_enabled(): log.disable_warnings_logging() del log # Now for the fun stuff... try: logging._acquireLock() try: loggerDict = logging.Logger.manager.loggerDict for key in loggerDict.keys(): if key == "astropy" or key.startswith("astropy."): del loggerDict[key] finally: logging._releaseLock() except Exception: pass Logger = logging.getLoggerClass() class AstropyLogger(Logger): """ This class is used to set up the Astropy logging. The main functionality added by this class over the built-in logging.Logger class is the ability to keep track of the origin of the messages, the ability to enable logging of warnings.warn calls and exceptions, and the addition of colorized output and context managers to easily capture messages to a file or list. """ def makeRecord( self, name, level, pathname, lineno, msg, args, exc_info, func=None, extra=None, sinfo=None, ): if extra is None: extra = {} if "origin" not in extra: current_module = find_current_module(1, finddiff=[True, "logging"]) if current_module is not None: extra["origin"] = current_module.__name__ else: extra["origin"] = "unknown" return Logger.makeRecord( self, name, level, pathname, lineno, msg, args, exc_info, func=func, extra=extra, sinfo=sinfo, ) _showwarning_orig = None def _showwarning(self, *args, **kwargs): # Bail out if we are not catching a warning from Astropy if not isinstance(args[0], AstropyWarning): return self._showwarning_orig(*args, **kwargs) warning = args[0] # Deliberately not using isinstance here: We want to display # the class name only when it's not the default class, # AstropyWarning. The name of subclasses of AstropyWarning should # be displayed. if type(warning) not in (AstropyWarning, AstropyUserWarning): message = f"{warning.__class__.__name__}: {args[0]}" else: message = str(args[0]) mod_path = args[2] # Now that we have the module's path, we look through sys.modules to # find the module object and thus the fully-package-specified module # name. The module.__file__ is the original source file name. mod_name = None mod_path, ext = os.path.splitext(mod_path) for name, mod in list(sys.modules.items()): try: # Believe it or not this can fail in some cases: # https://github.com/astropy/astropy/issues/2671 path = os.path.splitext(getattr(mod, "__file__", ""))[0] except Exception: continue if path == mod_path: mod_name = mod.__name__ break if mod_name is not None: self.warning(message, extra={"origin": mod_name}) else: self.warning(message) def warnings_logging_enabled(self): return self._showwarning_orig is not None def enable_warnings_logging(self): """ Enable logging of warnings.warn() calls. Once called, any subsequent calls to ``warnings.warn()`` are redirected to this logger and emitted with level ``WARN``. Note that this replaces the output from ``warnings.warn``. This can be disabled with ``disable_warnings_logging``. """ if self.warnings_logging_enabled(): raise LoggingError("Warnings logging has already been enabled") self._showwarning_orig = warnings.showwarning warnings.showwarning = self._showwarning def disable_warnings_logging(self): """ Disable logging of warnings.warn() calls. Once called, any subsequent calls to ``warnings.warn()`` are no longer redirected to this logger. This can be re-enabled with ``enable_warnings_logging``. """ if not self.warnings_logging_enabled(): raise LoggingError("Warnings logging has not been enabled") if warnings.showwarning != self._showwarning: raise LoggingError( "Cannot disable warnings logging: " "warnings.showwarning was not set by this " "logger, or has been overridden" ) warnings.showwarning = self._showwarning_orig self._showwarning_orig = None _excepthook_orig = None def _excepthook(self, etype, value, traceback): if traceback is None: mod = None else: tb = traceback while tb.tb_next is not None: tb = tb.tb_next mod = inspect.getmodule(tb) # include the the error type in the message. if len(value.args) > 0: message = f"{etype.__name__}: {str(value)}" else: message = str(etype.__name__) if mod is not None: self.error(message, extra={"origin": mod.__name__}) else: self.error(message) self._excepthook_orig(etype, value, traceback) def exception_logging_enabled(self): """ Determine if the exception-logging mechanism is enabled. Returns ------- exclog : bool True if exception logging is on, False if not. """ try: ip = get_ipython() except NameError: ip = None if ip is None: return self._excepthook_orig is not None else: return _AstLogIPYExc in ip.custom_exceptions def enable_exception_logging(self): """ Enable logging of exceptions. Once called, any uncaught exceptions will be emitted with level ``ERROR`` by this logger, before being raised. This can be disabled with ``disable_exception_logging``. """ try: ip = get_ipython() except NameError: ip = None if self.exception_logging_enabled(): raise LoggingError("Exception logging has already been enabled") if ip is None: # standard python interpreter self._excepthook_orig = sys.excepthook sys.excepthook = self._excepthook else: # IPython has its own way of dealing with excepthook # We need to locally define the function here, because IPython # actually makes this a member function of their own class def ipy_exc_handler(ipyshell, etype, evalue, tb, tb_offset=None): # First use our excepthook self._excepthook(etype, evalue, tb) # Now also do IPython's traceback ipyshell.showtraceback((etype, evalue, tb), tb_offset=tb_offset) # now register the function with IPython # note that we include _AstLogIPYExc so `disable_exception_logging` # knows that it's disabling the right thing ip.set_custom_exc((BaseException, _AstLogIPYExc), ipy_exc_handler) # and set self._excepthook_orig to a no-op self._excepthook_orig = lambda etype, evalue, tb: None def disable_exception_logging(self): """ Disable logging of exceptions. Once called, any uncaught exceptions will no longer be emitted by this logger. This can be re-enabled with ``enable_exception_logging``. """ try: ip = get_ipython() except NameError: ip = None if not self.exception_logging_enabled(): raise LoggingError("Exception logging has not been enabled") if ip is None: # standard python interpreter if sys.excepthook != self._excepthook: raise LoggingError( "Cannot disable exception logging: " "sys.excepthook was not set by this logger, " "or has been overridden" ) sys.excepthook = self._excepthook_orig self._excepthook_orig = None else: # IPython has its own way of dealing with exceptions ip.set_custom_exc(tuple(), None) def enable_color(self): """ Enable colorized output. """ _conf.use_color = True def disable_color(self): """ Disable colorized output. """ _conf.use_color = False @contextmanager def log_to_file(self, filename, filter_level=None, filter_origin=None): """ Context manager to temporarily log messages to a file. Parameters ---------- filename : str The file to log messages to. filter_level : str If set, any log messages less important than ``filter_level`` will not be output to the file. Note that this is in addition to the top-level filtering for the logger, so if the logger has level 'INFO', then setting ``filter_level`` to ``INFO`` or ``DEBUG`` will have no effect, since these messages are already filtered out. filter_origin : str If set, only log messages with an origin starting with ``filter_origin`` will be output to the file. Notes ----- By default, the logger already outputs log messages to a file set in the Astropy configuration file. Using this context manager does not stop log messages from being output to that file, nor does it stop log messages from being printed to standard output. Examples -------- The context manager is used as:: with logger.log_to_file('myfile.log'): # your code here """ encoding = conf.log_file_encoding if conf.log_file_encoding else None fh = logging.FileHandler(filename, encoding=encoding) if filter_level is not None: fh.setLevel(filter_level) if filter_origin is not None: fh.addFilter(FilterOrigin(filter_origin)) f = logging.Formatter(conf.log_file_format) fh.setFormatter(f) self.addHandler(fh) yield fh.close() self.removeHandler(fh) @contextmanager def log_to_list(self, filter_level=None, filter_origin=None): """ Context manager to temporarily log messages to a list. Parameters ---------- filename : str The file to log messages to. filter_level : str If set, any log messages less important than ``filter_level`` will not be output to the file. Note that this is in addition to the top-level filtering for the logger, so if the logger has level 'INFO', then setting ``filter_level`` to ``INFO`` or ``DEBUG`` will have no effect, since these messages are already filtered out. filter_origin : str If set, only log messages with an origin starting with ``filter_origin`` will be output to the file. Notes ----- Using this context manager does not stop log messages from being output to standard output. Examples -------- The context manager is used as:: with logger.log_to_list() as log_list: # your code here """ lh = ListHandler() if filter_level is not None: lh.setLevel(filter_level) if filter_origin is not None: lh.addFilter(FilterOrigin(filter_origin)) self.addHandler(lh) yield lh.log_list self.removeHandler(lh) def _set_defaults(self): """ Reset logger to its initial state. """ # Reset any previously installed hooks if self.warnings_logging_enabled(): self.disable_warnings_logging() if self.exception_logging_enabled(): self.disable_exception_logging() # Remove all previous handlers for handler in self.handlers[:]: self.removeHandler(handler) # Set levels self.setLevel(conf.log_level) # Set up the stdout handler sh = StreamHandler() self.addHandler(sh) # Set up the main log file handler if requested (but this might fail if # configuration directory or log file is not writeable). if conf.log_to_file: log_file_path = conf.log_file_path # "None" as a string because it comes from config try: _ASTROPY_TEST_ testing_mode = True except NameError: testing_mode = False try: if log_file_path == "" or testing_mode: log_file_path = os.path.join( _config.get_config_dir("astropy"), "astropy.log" ) else: log_file_path = os.path.expanduser(log_file_path) encoding = conf.log_file_encoding if conf.log_file_encoding else None fh = logging.FileHandler(log_file_path, encoding=encoding) except OSError as e: warnings.warn( f"log file {log_file_path!r} could not be opened for writing:" f" {str(e)}", RuntimeWarning, ) else: formatter = logging.Formatter(conf.log_file_format) fh.setFormatter(formatter) fh.setLevel(conf.log_file_level) self.addHandler(fh) if conf.log_warnings: self.enable_warnings_logging() if conf.log_exceptions: self.enable_exception_logging() class StreamHandler(logging.StreamHandler): """ A specialized StreamHandler that logs INFO and DEBUG messages to stdout, and all other messages to stderr. Also provides coloring of the output, if enabled in the parent logger. """ def emit(self, record): """ The formatter for stderr. """ if record.levelno <= logging.INFO: stream = sys.stdout else: stream = sys.stderr if record.levelno < logging.DEBUG or not _conf.use_color: print(record.levelname, end="", file=stream) else: # Import utils.console only if necessary and at the latest because # the import takes a significant time [#4649] from .utils.console import color_print if record.levelno < logging.INFO: color_print(record.levelname, "magenta", end="", file=stream) elif record.levelno < logging.WARN: color_print(record.levelname, "green", end="", file=stream) elif record.levelno < logging.ERROR: color_print(record.levelname, "brown", end="", file=stream) else: color_print(record.levelname, "red", end="", file=stream) record.message = f"{record.msg} [{record.origin:s}]" print(": " + record.message, file=stream) class FilterOrigin: """A filter for the record origin.""" def __init__(self, origin): self.origin = origin def filter(self, record): return record.origin.startswith(self.origin) class ListHandler(logging.Handler): """A handler that can be used to capture the records in a list.""" def __init__(self, filter_level=None, filter_origin=None): logging.Handler.__init__(self) self.log_list = [] def emit(self, record): self.log_list.append(record)
410221e340f78c1157cbc805be2da6984d3f8bb7c94cf4dd113cdf618a34e318
# Licensed under a 3-clause BSD style license - see LICENSE.rst # # Astropy documentation build configuration file. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this file. # # All configuration values have a default. Some values are defined in # the global Astropy configuration which is loaded here before anything else. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('..')) # IMPORTANT: the above commented section was generated by sphinx-quickstart, but # is *NOT* appropriate for astropy or Astropy affiliated packages. It is left # commented out with this explanation to make it clear why this should not be # done. If the sys.path entry above is added, when the astropy.sphinx.conf # import occurs, it will import the *source* version of astropy instead of the # version installed (if invoked as "make html" or directly with sphinx), or the # version in the build directory. # Thus, any C-extensions that are needed to build the documentation will *not* # be accessible, and the documentation will not build correctly. # See sphinx_astropy.conf for which values are set there. import configparser import doctest import os import sys from datetime import datetime from importlib import metadata from packaging.requirements import Requirement from packaging.specifiers import SpecifierSet # -- Check for missing dependencies ------------------------------------------- missing_requirements = {} for line in metadata.requires("astropy"): if 'extra == "docs"' in line: req = Requirement(line.split(";")[0]) req_package = req.name.lower() req_specifier = str(req.specifier) try: version = metadata.version(req_package) except metadata.PackageNotFoundError: missing_requirements[req_package] = req_specifier if version not in SpecifierSet(req_specifier, prereleases=True): missing_requirements[req_package] = req_specifier if missing_requirements: print( "The following packages could not be found and are required to " "build the documentation:" ) for key, val in missing_requirements.items(): print(f" * {key} {val}") print('Please install the "docs" requirements.') sys.exit(1) from sphinx_astropy.conf.v1 import * # noqa: E402 from sphinx_astropy.conf.v1 import ( # noqa: E402 exclude_patterns, extensions, intersphinx_mapping, numpydoc_xref_aliases, numpydoc_xref_astropy_aliases, numpydoc_xref_ignore, rst_epilog, ) # -- Plot configuration ------------------------------------------------------- plot_rcparams = { "axes.labelsize": "large", "figure.figsize": (6, 6), "figure.subplot.hspace": 0.5, "savefig.bbox": "tight", "savefig.facecolor": "none", } plot_apply_rcparams = True plot_html_show_source_link = False plot_formats = ["png", "svg", "pdf"] # Don't use the default - which includes a numpy and matplotlib import plot_pre_code = "" # -- General configuration ---------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = "3.0" # The intersphinx_mapping in sphinx_astropy.sphinx refers to astropy for # the benefit of other packages who want to refer to objects in the # astropy core. However, we don't want to cyclically reference astropy in its # own build so we remove it here. del intersphinx_mapping["astropy"] # add any custom intersphinx for astropy intersphinx_mapping.update( { "astropy-dev": ("https://docs.astropy.org/en/latest/", None), "pyerfa": ("https://pyerfa.readthedocs.io/en/stable/", None), "pytest": ("https://docs.pytest.org/en/stable/", None), "ipython": ("https://ipython.readthedocs.io/en/stable/", None), "pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None), "sphinx_automodapi": ( "https://sphinx-automodapi.readthedocs.io/en/stable/", None, ), "packagetemplate": ( "https://docs.astropy.org/projects/package-template/en/latest/", None, ), "asdf-astropy": ("https://asdf-astropy.readthedocs.io/en/latest/", None), "fsspec": ("https://filesystem-spec.readthedocs.io/en/latest/", None), } ) # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # .inc.rst mean *include* files, don't have sphinx process them exclude_patterns += ["_templates", "changes", "_pkgtemplate.rst", "**/*.inc.rst"] # Add any paths that contain templates here, relative to this directory. if "templates_path" not in locals(): # in case parent conf.py defines it templates_path = [] templates_path.append("_templates") extensions += ["sphinx_changelog"] # Grab minversion from setup.cfg setup_cfg = configparser.ConfigParser() setup_cfg.read(os.path.join(os.path.pardir, "setup.cfg")) __minimum_python_version__ = setup_cfg["options"]["python_requires"].replace(">=", "") min_versions = {} for line in metadata.requires("astropy"): req = Requirement(line.split(";")[0]) min_versions[req.name.lower()] = str(req.specifier) # This is added to the end of RST files - a good place to put substitutions to # be used globally. with open("common_links.txt") as cl: rst_epilog += cl.read().format( minimum_python=__minimum_python_version__, **min_versions ) # Manually register doctest options since matplotlib 3.5 messed up allowing them # from pytest-doctestplus IGNORE_OUTPUT = doctest.register_optionflag("IGNORE_OUTPUT") REMOTE_DATA = doctest.register_optionflag("REMOTE_DATA") FLOAT_CMP = doctest.register_optionflag("FLOAT_CMP") # Whether to create cross-references for the parameter types in the # Parameters, Other Parameters, Returns and Yields sections of the docstring. numpydoc_xref_param_type = True # Words not to cross-reference. Most likely, these are common words used in # parameter type descriptions that may be confused for classes of the same # name. The base set comes from sphinx-astropy. We add more here. numpydoc_xref_ignore.update( { "mixin", "Any", # aka something that would be annotated with `typing.Any` # needed in subclassing numpy # TODO! revisit "Arguments", "Path", # TODO! not need to ignore. "flag", "bits", } ) # Mappings to fully qualified paths (or correct ReST references) for the # aliases/shortcuts used when specifying the types of parameters. # Numpy provides some defaults # https://github.com/numpy/numpydoc/blob/b352cd7635f2ea7748722f410a31f937d92545cc/numpydoc/xref.py#L62-L94 # and a base set comes from sphinx-astropy. # so here we mostly need to define Astropy-specific x-refs numpydoc_xref_aliases.update( { # python & adjacent "Any": "`~typing.Any`", "file-like": ":term:`python:file-like object`", "file": ":term:`python:file object`", "path-like": ":term:`python:path-like object`", "module": ":term:`python:module`", "buffer-like": ":term:buffer-like", "hashable": ":term:`python:hashable`", # for matplotlib "color": ":term:`color`", # for numpy "ints": ":class:`python:int`", # for astropy "number": ":term:`number`", "Representation": ":class:`~astropy.coordinates.BaseRepresentation`", "writable": ":term:`writable file-like object`", "readable": ":term:`readable file-like object`", "BaseHDU": ":doc:`HDU </io/fits/api/hdus>`", } ) # Add from sphinx-astropy 1) glossary aliases 2) physical types. numpydoc_xref_aliases.update(numpydoc_xref_astropy_aliases) # Turn off table of contents entries for functions and classes toc_object_entries = False # -- Project information ------------------------------------------------------ project = "Astropy" author = "The Astropy Developers" copyright = f"2011–{datetime.utcnow().year}, " + author # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # The full version, including alpha/beta/rc tags. release = metadata.version(project) # The short X.Y version. version = ".".join(release.split(".")[:2]) # Only include dev docs in dev version. dev = "dev" in release if not dev: exclude_patterns += ["development/*", "testhelpers.rst"] # -- Options for the module index --------------------------------------------- modindex_common_prefix = ["astropy."] # -- Options for HTML output --------------------------------------------------- # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". html_title = f"{project} v{release}" # Output file base name for HTML help builder. htmlhelp_basename = project + "doc" # A dictionary of values to pass into the template engine’s context for all pages. html_context = {"to_be_indexed": ["stable", "latest"], "is_development": dev} # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. html_extra_path = ["robots.txt"] # -- Options for LaTeX output -------------------------------------------------- # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ("index", project + ".tex", project + " Documentation", author, "manual") ] latex_logo = "_static/astropy_logo.pdf" # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [("index", project.lower(), project + " Documentation", [author], 1)] # Setting this URL is requited by sphinx-astropy github_issues_url = "https://github.com/astropy/astropy/issues/" edit_on_github_branch = "main" # Enable nitpicky mode - which ensures that all references in the docs # resolve. nitpicky = True # See docs/nitpick-exceptions file for the actual listing. nitpick_ignore = [] for line in open("nitpick-exceptions"): if line.strip() == "" or line.startswith("#"): continue dtype, target = line.split(None, 1) nitpick_ignore.append((dtype, target.strip())) # -- Options for the Sphinx gallery ------------------------------------------- try: import warnings import sphinx_gallery extensions += ["sphinx_gallery.gen_gallery"] sphinx_gallery_conf = { "backreferences_dir": "generated/modules", # path to store the module using example template "filename_pattern": "^((?!skip_).)*$", # execute all examples except those that start with "skip_" "examples_dirs": f"..{os.sep}examples", # path to the examples scripts "gallery_dirs": "generated/examples", # path to save gallery generated examples "reference_url": { "astropy": None, "matplotlib": "https://matplotlib.org/stable/", "numpy": "https://numpy.org/doc/stable/", }, "abort_on_example_error": True, } # Filter out backend-related warnings as described in # https://github.com/sphinx-gallery/sphinx-gallery/pull/564 warnings.filterwarnings( "ignore", category=UserWarning, message=( "Matplotlib is currently using agg, which is a" " non-GUI backend, so cannot show the figure." ), ) except ImportError: sphinx_gallery = None # -- Options for linkcheck output ------------------------------------------- linkcheck_retry = 5 linkcheck_ignore = [ "https://journals.aas.org/manuscript-preparation/", "https://maia.usno.navy.mil/", "https://www.usno.navy.mil/USNO/time/gps/usno-gps-time-transfer", "https://aa.usno.navy.mil/publications/docs/Circular_179.php", "http://data.astropy.org", "https://doi.org/", # CI blocked by service provider "https://ui.adsabs.harvard.edu", # CI blocked by service provider "https://www.tandfonline.com/", # 403 Client Error: Forbidden "https://physics.nist.gov/", # SSL: CERTIFICATE_VERIFY_FAILED "https://pyfits.readthedocs.io/en/v3.2.1/", # defunct page in CHANGES.rst r"https://github\.com/astropy/astropy/(?:issues|pull)/\d+", ] linkcheck_timeout = 180 linkcheck_anchors = False def rstjinja(app, docname, source): """Render pages as a jinja template to hide/show dev docs.""" # Make sure we're outputting HTML if app.builder.format != "html": return files_to_render = ["index", "install"] if docname in files_to_render: print(f"Jinja rendering {docname}") rendered = app.builder.templates.render_string( source[0], app.config.html_context ) source[0] = rendered def resolve_astropy_and_dev_reference(app, env, node, contnode): """ Reference targets for ``astropy:`` and ``astropy-dev:`` are special cases. Documentation links in astropy can be set up as intersphinx links so that affiliate packages do not have to override the docstrings when building the docs. If we are building the development docs it is a local ref targeting the label ``astropy-dev:<label>``, but for stable docs it should be an intersphinx resolution to the development docs. See https://github.com/astropy/astropy/issues/11366 """ # should the node be processed? reftarget = node.get("reftarget") # str or None if str(reftarget).startswith("astropy:"): # This allows Astropy to use intersphinx links to itself and have # them resolve to local links. Downstream packages will see intersphinx. # TODO! deprecate this if sphinx-doc/sphinx/issues/9169 is implemented. process, replace = True, "astropy:" elif dev and str(reftarget).startswith("astropy-dev:"): process, replace = True, "astropy-dev:" else: process, replace = False, "" # make link local if process: reftype = node.get("reftype") refdoc = node.get("refdoc", app.env.docname) # convert astropy intersphinx targets to local links. # there are a few types of intersphinx link patterns, as described in # https://docs.readthedocs.io/en/stable/guides/intersphinx.html reftarget = reftarget.replace(replace, "") if reftype == "doc": # also need to replace the doc link node.replace_attr("reftarget", reftarget) # Delegate to the ref node's original domain/target (typically :ref:) try: domain = app.env.domains[node["refdomain"]] return domain.resolve_xref( app.env, refdoc, app.builder, reftype, reftarget, node, contnode ) except Exception: pass # Otherwise return None which should delegate to intersphinx def setup(app): if sphinx_gallery is None: msg = ( "The sphinx_gallery extension is not installed, so the " "gallery will not be built. You will probably see " "additional warnings about undefined references due " "to this." ) try: app.warn(msg) except AttributeError: # Sphinx 1.6+ from sphinx.util import logging logger = logging.getLogger(__name__) logger.warning(msg) # Generate the page from Jinja template app.connect("source-read", rstjinja) # Set this to higher priority than intersphinx; this way when building # dev docs astropy-dev: targets will go to the local docs instead of the # intersphinx mapping app.connect("missing-reference", resolve_astropy_and_dev_reference, priority=400)
29c0e0e78aab377708e10974a0bb43658d0721e0f1cf78fbadc90e6d04b2f332
import os import shutil import sys import erfa # noqa: F401 import matplotlib import pytest import astropy # noqa: F401 if len(sys.argv) == 3 and sys.argv[1] == "--astropy-root": ROOT = sys.argv[2] else: # Make sure we don't allow any arguments to be passed - some tests call # sys.executable which becomes this script when producing a pyinstaller # bundle, but we should just error in this case since this is not the # regular Python interpreter. if len(sys.argv) > 1: print("Extra arguments passed, exiting early") sys.exit(1) for root, dirnames, files in os.walk(os.path.join(ROOT, "astropy")): # NOTE: we can't simply use # test_root = root.replace('astropy', 'astropy_tests') # as we only want to change the one which is for the module, so instead # we search for the last occurrence and replace that. pos = root.rfind("astropy") test_root = root[:pos] + "astropy_tests" + root[pos + 7 :] # Copy over the astropy 'tests' directories and their contents for dirname in dirnames: final_dir = os.path.relpath(os.path.join(test_root, dirname), ROOT) # We only copy over 'tests' directories, but not astropy/tests (only # astropy/tests/tests) since that is not just a directory with tests. if dirname == "tests" and not root.endswith("astropy"): shutil.copytree(os.path.join(root, dirname), final_dir, dirs_exist_ok=True) else: # Create empty __init__.py files so that 'astropy_tests' still # behaves like a single package, otherwise pytest gets confused # by the different conftest.py files. init_filename = os.path.join(final_dir, "__init__.py") if not os.path.exists(os.path.join(final_dir, "__init__.py")): os.makedirs(final_dir, exist_ok=True) with open(os.path.join(final_dir, "__init__.py"), "w") as f: f.write("#") # Copy over all conftest.py files for file in files: if file == "conftest.py": final_file = os.path.relpath(os.path.join(test_root, file), ROOT) shutil.copy2(os.path.join(root, file), final_file) # Add the top-level __init__.py file with open(os.path.join("astropy_tests", "__init__.py"), "w") as f: f.write("#") # Remove test file that tries to import all sub-packages at collection time os.remove( os.path.join("astropy_tests", "utils", "iers", "tests", "test_leap_second.py") ) # Remove convolution tests for now as there are issues with the loading of the C extension. # FIXME: one way to fix this would be to migrate the convolution C extension away from using # ctypes and using the regular extension mechanism instead. shutil.rmtree(os.path.join("astropy_tests", "convolution")) os.remove(os.path.join("astropy_tests", "modeling", "tests", "test_convolution.py")) os.remove(os.path.join("astropy_tests", "modeling", "tests", "test_core.py")) os.remove(os.path.join("astropy_tests", "visualization", "tests", "test_lupton_rgb.py")) # FIXME: PIL minversion check does not work os.remove( os.path.join("astropy_tests", "visualization", "wcsaxes", "tests", "test_misc.py") ) os.remove( os.path.join("astropy_tests", "visualization", "wcsaxes", "tests", "test_wcsapi.py") ) # FIXME: The following tests rely on the fully qualified name of classes which # don't seem to be the same. os.remove(os.path.join("astropy_tests", "table", "mixins", "tests", "test_registry.py")) # Copy the top-level conftest.py shutil.copy2( os.path.join(ROOT, "astropy", "conftest.py"), os.path.join("astropy_tests", "conftest.py"), ) # matplotlib hook in pyinstaller 5.0 and later no longer collects every backend, see # https://github.com/pyinstaller/pyinstaller/issues/6760 matplotlib.use("svg") # We skip a few tests, which are generally ones that rely on explicitly # checking the name of the current module (which ends up starting with # astropy_tests rather than astropy). SKIP_TESTS = [ "test_exception_logging_origin", "test_log", "test_configitem", "test_config_noastropy_fallback", "test_no_home", "test_path", "test_rename_path", "test_data_name_third_party_package", "test_pkg_finder", "test_wcsapi_extension", "test_find_current_module_bundle", "test_minversion", "test_imports", "test_generate_config", "test_generate_config2", "test_create_config_file", "test_download_parallel_fills_cache", ] # Run the tests! sys.exit( pytest.main( ["astropy_tests", "-k " + " and ".join("not " + test for test in SKIP_TESTS)], plugins=[ "pytest_astropy.plugin", "pytest_doctestplus.plugin", "pytest_remotedata.plugin", "pytest_astropy_header.display", ], ) )
391c67bc27fecf9ee7f4dd8a6b745b441b4ad3bd61aab7b68a344e2883576f36
""" =================================================================== Determining and plotting the altitude/azimuth of a celestial object =================================================================== This example demonstrates coordinate transformations and the creation of visibility curves to assist with observing run planning. In this example, we make a `~astropy.coordinates.SkyCoord` instance for M33. The altitude-azimuth coordinates are then found using `astropy.coordinates.EarthLocation` and `astropy.time.Time` objects. This example is meant to demonstrate the capabilities of the `astropy.coordinates` package. For more convenient and/or complex observation planning, consider the `astroplan <https://astroplan.readthedocs.org/>`_ package. *By: Erik Tollerud, Kelle Cruz* *License: BSD* """ ############################################################################## # Let's suppose you are planning to visit picturesque Bear Mountain State Park # in New York, USA. You're bringing your telescope with you (of course), and # someone told you M33 is a great target to observe there. You happen to know # you're free at 11:00 pm local time, and you want to know if it will be up. # Astropy can answer that. # # Import numpy and matplotlib. For the latter, use a nicer set of plot # parameters and set up support for plotting/converting quantities. import matplotlib.pyplot as plt import numpy as np from astropy.visualization import astropy_mpl_style, quantity_support plt.style.use(astropy_mpl_style) quantity_support() ############################################################################## # Import the packages necessary for finding coordinates and making # coordinate transformations import astropy.units as u from astropy.coordinates import AltAz, EarthLocation, SkyCoord from astropy.time import Time ############################################################################## # `astropy.coordinates.SkyCoord.from_name` uses Simbad to resolve object # names and retrieve coordinates. # # Get the coordinates of M33: m33 = SkyCoord.from_name('M33') ############################################################################## # Use `astropy.coordinates.EarthLocation` to provide the location of Bear # Mountain and set the time to 11pm EDT on 2012 July 12: bear_mountain = EarthLocation(lat=41.3*u.deg, lon=-74*u.deg, height=390*u.m) utcoffset = -4*u.hour # Eastern Daylight Time time = Time('2012-7-12 23:00:00') - utcoffset ############################################################################## # `astropy.coordinates.EarthLocation.get_site_names` and # `~astropy.coordinates.EarthLocation.get_site_names` can be used to get # locations of major observatories. # # Use `astropy.coordinates` to find the Alt, Az coordinates of M33 at as # observed from Bear Mountain at 11pm on 2012 July 12. m33altaz = m33.transform_to(AltAz(obstime=time,location=bear_mountain)) print(f"M33's Altitude = {m33altaz.alt:.2}") ############################################################################## # This is helpful since it turns out M33 is barely above the horizon at this # time. It's more informative to find M33's airmass over the course of # the night. # # Find the alt,az coordinates of M33 at 100 times evenly spaced between 10pm # and 7am EDT: midnight = Time('2012-7-13 00:00:00') - utcoffset delta_midnight = np.linspace(-2, 10, 100)*u.hour frame_July13night = AltAz(obstime=midnight+delta_midnight, location=bear_mountain) m33altazs_July13night = m33.transform_to(frame_July13night) ############################################################################## # convert alt, az to airmass with `~astropy.coordinates.AltAz.secz` attribute: m33airmasss_July13night = m33altazs_July13night.secz ############################################################################## # Plot the airmass as a function of time: plt.plot(delta_midnight, m33airmasss_July13night) plt.xlim(-2, 10) plt.ylim(1, 4) plt.xlabel('Hours from EDT Midnight') plt.ylabel('Airmass [Sec(z)]') plt.show() ############################################################################## # Use `~astropy.coordinates.get_sun` to find the location of the Sun at 1000 # evenly spaced times between noon on July 12 and noon on July 13: from astropy.coordinates import get_sun delta_midnight = np.linspace(-12, 12, 1000)*u.hour times_July12_to_13 = midnight + delta_midnight frame_July12_to_13 = AltAz(obstime=times_July12_to_13, location=bear_mountain) sunaltazs_July12_to_13 = get_sun(times_July12_to_13).transform_to(frame_July12_to_13) ############################################################################## # Do the same with `~astropy.coordinates.get_body` to find when the moon is # up. Be aware that this will need to download a 10MB file from the internet # to get a precise location of the moon. from astropy.coordinates import get_body moon_July12_to_13 = get_body("moon", times_July12_to_13) moonaltazs_July12_to_13 = moon_July12_to_13.transform_to(frame_July12_to_13) ############################################################################## # Find the alt,az coordinates of M33 at those same times: m33altazs_July12_to_13 = m33.transform_to(frame_July12_to_13) ############################################################################## # Make a beautiful figure illustrating nighttime and the altitudes of M33 and # the Sun over that time: plt.plot(delta_midnight, sunaltazs_July12_to_13.alt, color='r', label='Sun') plt.plot(delta_midnight, moonaltazs_July12_to_13.alt, color=[0.75]*3, ls='--', label='Moon') plt.scatter(delta_midnight, m33altazs_July12_to_13.alt, c=m33altazs_July12_to_13.az, label='M33', lw=0, s=8, cmap='viridis') plt.fill_between(delta_midnight, 0*u.deg, 90*u.deg, sunaltazs_July12_to_13.alt < -0*u.deg, color='0.5', zorder=0) plt.fill_between(delta_midnight, 0*u.deg, 90*u.deg, sunaltazs_July12_to_13.alt < -18*u.deg, color='k', zorder=0) plt.colorbar().set_label('Azimuth [deg]') plt.legend(loc='upper left') plt.xlim(-12*u.hour, 12*u.hour) plt.xticks((np.arange(13)*2-12)*u.hour) plt.ylim(0*u.deg, 90*u.deg) plt.xlabel('Hours from EDT Midnight') plt.ylabel('Altitude [deg]') plt.show()
fbd28ab7bdfa9a10d297d8800754668ef058e7c6f2ae907dcf81cdd82e53ae90
r""" ========================================================== Create a new coordinate class (for the Sagittarius stream) ========================================================== This document describes in detail how to subclass and define a custom spherical coordinate frame, as discussed in :ref:`astropy:astropy-coordinates-design` and the docstring for `~astropy.coordinates.BaseCoordinateFrame`. In this example, we will define a coordinate system defined by the plane of orbit of the Sagittarius Dwarf Galaxy (hereafter Sgr; as defined in Majewski et al. 2003). The Sgr coordinate system is often referred to in terms of two angular coordinates, :math:`\Lambda,B`. To do this, we need to define a subclass of `~astropy.coordinates.BaseCoordinateFrame` that knows the names and units of the coordinate system angles in each of the supported representations. In this case we support `~astropy.coordinates.SphericalRepresentation` with "Lambda" and "Beta". Then we have to define the transformation from this coordinate system to some other built-in system. Here we will use Galactic coordinates, represented by the `~astropy.coordinates.Galactic` class. See Also -------- * The `gala package <http://gala.adrian.pw/>`_, which defines a number of Astropy coordinate frames for stellar stream coordinate systems. * Majewski et al. 2003, "A Two Micron All Sky Survey View of the Sagittarius Dwarf Galaxy. I. Morphology of the Sagittarius Core and Tidal Arms", https://arxiv.org/abs/astro-ph/0304198 * Law & Majewski 2010, "The Sagittarius Dwarf Galaxy: A Model for Evolution in a Triaxial Milky Way Halo", https://arxiv.org/abs/1003.1132 * David Law's Sgr info page https://www.stsci.edu/~dlaw/Sgr/ *By: Adrian Price-Whelan, Erik Tollerud* *License: BSD* """ ############################################################################## # Make `print` work the same in all versions of Python, set up numpy, # matplotlib, and use a nicer set of plot parameters: import matplotlib.pyplot as plt import numpy as np from astropy.visualization import astropy_mpl_style plt.style.use(astropy_mpl_style) ############################################################################## # Import the packages necessary for coordinates import astropy.coordinates as coord import astropy.units as u from astropy.coordinates import frame_transform_graph from astropy.coordinates.matrix_utilities import matrix_transpose, rotation_matrix ############################################################################## # The first step is to create a new class, which we'll call # ``Sagittarius`` and make it a subclass of # `~astropy.coordinates.BaseCoordinateFrame`: class Sagittarius(coord.BaseCoordinateFrame): """ A Heliocentric spherical coordinate system defined by the orbit of the Sagittarius dwarf galaxy, as described in https://ui.adsabs.harvard.edu/abs/2003ApJ...599.1082M and further explained in https://www.stsci.edu/~dlaw/Sgr/. Parameters ---------- representation : `~astropy.coordinates.BaseRepresentation` or None A representation object or None to have no data (or use the other keywords) Lambda : `~astropy.coordinates.Angle`, optional, must be keyword The longitude-like angle corresponding to Sagittarius' orbit. Beta : `~astropy.coordinates.Angle`, optional, must be keyword The latitude-like angle corresponding to Sagittarius' orbit. distance : `~astropy.units.Quantity`, optional, must be keyword The Distance for this object along the line-of-sight. pm_Lambda_cosBeta : `~astropy.units.Quantity`, optional, must be keyword The proper motion along the stream in ``Lambda`` (including the ``cos(Beta)`` factor) for this object (``pm_Beta`` must also be given). pm_Beta : `~astropy.units.Quantity`, optional, must be keyword The proper motion in Declination for this object (``pm_ra_cosdec`` must also be given). radial_velocity : `~astropy.units.Quantity`, optional, keyword-only The radial velocity of this object. """ default_representation = coord.SphericalRepresentation default_differential = coord.SphericalCosLatDifferential frame_specific_representation_info = { coord.SphericalRepresentation: [ coord.RepresentationMapping('lon', 'Lambda'), coord.RepresentationMapping('lat', 'Beta'), coord.RepresentationMapping('distance', 'distance')] } ############################################################################## # Breaking this down line-by-line, we define the class as a subclass of # `~astropy.coordinates.BaseCoordinateFrame`. Then we include a descriptive # docstring. The final lines are class-level attributes that specify the # default representation for the data, default differential for the velocity # information, and mappings from the attribute names used by representation # objects to the names that are to be used by the ``Sagittarius`` frame. In this # case we override the names in the spherical representations but don't do # anything with other representations like cartesian or cylindrical. # # Next we have to define the transformation from this coordinate system to some # other built-in coordinate system; we will use Galactic coordinates. We can do # this by defining functions that return transformation matrices, or by simply # defining a function that accepts a coordinate and returns a new coordinate in # the new system. Because the transformation to the Sagittarius coordinate # system is just a spherical rotation from Galactic coordinates, we'll just # define a function that returns this matrix. We'll start by constructing the # transformation matrix using pre-determined Euler angles and the # ``rotation_matrix`` helper function: SGR_PHI = (180 + 3.75) * u.degree # Euler angles (from Law & Majewski 2010) SGR_THETA = (90 - 13.46) * u.degree SGR_PSI = (180 + 14.111534) * u.degree # Generate the rotation matrix using the x-convention (see Goldstein) SGR_MATRIX = ( np.diag([1.,1.,-1.]) @ rotation_matrix(SGR_PSI, "z") @ rotation_matrix(SGR_THETA, "x") @ rotation_matrix(SGR_PHI, "z") ) ############################################################################## # Since we already constructed the transformation (rotation) matrix above, and # the inverse of a rotation matrix is just its transpose, the required # transformation functions are very simple: @frame_transform_graph.transform(coord.StaticMatrixTransform, coord.Galactic, Sagittarius) def galactic_to_sgr(): """Compute the Galactic spherical to heliocentric Sgr transformation matrix.""" return SGR_MATRIX ############################################################################## # The decorator ``@frame_transform_graph.transform(coord.StaticMatrixTransform, # coord.Galactic, Sagittarius)`` registers this function on the # ``frame_transform_graph`` as a coordinate transformation. Inside the function, # we simply return the previously defined rotation matrix. # # We then register the inverse transformation by using the transpose of the # rotation matrix (which is faster to compute than the inverse): @frame_transform_graph.transform(coord.StaticMatrixTransform, Sagittarius, coord.Galactic) def sgr_to_galactic(): """Compute the heliocentric Sgr to spherical Galactic transformation matrix.""" return matrix_transpose(SGR_MATRIX) ############################################################################## # Now that we've registered these transformations between ``Sagittarius`` and # `~astropy.coordinates.Galactic`, we can transform between *any* coordinate # system and ``Sagittarius`` (as long as the other system has a path to # transform to `~astropy.coordinates.Galactic`). For example, to transform from # ICRS coordinates to ``Sagittarius``, we would do: icrs = coord.SkyCoord(280.161732*u.degree, 11.91934*u.degree, frame='icrs') sgr = icrs.transform_to(Sagittarius) print(sgr) ############################################################################## # Or, to transform from the ``Sagittarius`` frame to ICRS coordinates (in this # case, a line along the ``Sagittarius`` x-y plane): sgr = coord.SkyCoord(Lambda=np.linspace(0, 2*np.pi, 128)*u.radian, Beta=np.zeros(128)*u.radian, frame='sagittarius') icrs = sgr.transform_to(coord.ICRS) print(icrs) ############################################################################## # As an example, we'll now plot the points in both coordinate systems: fig, axes = plt.subplots(2, 1, figsize=(8, 10), subplot_kw={'projection': 'aitoff'}) axes[0].set_title("Sagittarius") axes[0].plot(sgr.Lambda.wrap_at(180*u.deg).radian, sgr.Beta.radian, linestyle='none', marker='.') axes[1].set_title("ICRS") axes[1].plot(icrs.ra.wrap_at(180*u.deg).radian, icrs.dec.radian, linestyle='none', marker='.') plt.show() ############################################################################## # This particular transformation is just a spherical rotation, which is a # special case of an Affine transformation with no vector offset. The # transformation of velocity components is therefore natively supported as # well: sgr = coord.SkyCoord(Lambda=np.linspace(0, 2*np.pi, 128)*u.radian, Beta=np.zeros(128)*u.radian, pm_Lambda_cosBeta=np.random.uniform(-5, 5, 128)*u.mas/u.yr, pm_Beta=np.zeros(128)*u.mas/u.yr, frame='sagittarius') icrs = sgr.transform_to(coord.ICRS) print(icrs) fig, axes = plt.subplots(3, 1, figsize=(8, 10), sharex=True) axes[0].set_title("Sagittarius") axes[0].plot(sgr.Lambda.degree, sgr.pm_Lambda_cosBeta.value, linestyle='none', marker='.') axes[0].set_xlabel(r"$\Lambda$ [deg]") axes[0].set_ylabel( fr"$\mu_\Lambda \, \cos B$ [{sgr.pm_Lambda_cosBeta.unit.to_string('latex_inline')}]") axes[1].set_title("ICRS") axes[1].plot(icrs.ra.degree, icrs.pm_ra_cosdec.value, linestyle='none', marker='.') axes[1].set_ylabel( fr"$\mu_\alpha \, \cos\delta$ [{icrs.pm_ra_cosdec.unit.to_string('latex_inline')}]") axes[2].set_title("ICRS") axes[2].plot(icrs.ra.degree, icrs.pm_dec.value, linestyle='none', marker='.') axes[2].set_xlabel("RA [deg]") axes[2].set_ylabel( fr"$\mu_\delta$ [{icrs.pm_dec.unit.to_string('latex_inline')}]") plt.show()
27742ef99beef54313e8618d3404661dae9f708f77b78909458fff27957a13ff
""" ========================================== Create a very large FITS file from scratch ========================================== This example demonstrates how to create a large file (larger than will fit in memory) from scratch using `astropy.io.fits`. *By: Erik Bray* *License: BSD* """ ############################################################################## # Normally to create a single image FITS file one would do something like: import os import numpy as np from astropy.io import fits data = np.zeros((40000, 40000), dtype=np.float64) hdu = fits.PrimaryHDU(data=data) ############################################################################## # Then use the `astropy.io.fits.writeto()` method to write out the new # file to disk hdu.writeto('large.fits') ############################################################################## # However, a 40000 x 40000 array of doubles is nearly twelve gigabytes! Most # systems won't be able to create that in memory just to write out to disk. In # order to create such a large file efficiently requires a little extra work, # and a few assumptions. # # First, it is helpful to anticipate about how large (as in, how many keywords) # the header will have in it. FITS headers must be written in 2880 byte # blocks, large enough for 36 keywords per block (including the END keyword in # the final block). Typical headers have somewhere between 1 and 4 blocks, # though sometimes more. # # Since the first thing we write to a FITS file is the header, we want to write # enough header blocks so that there is plenty of padding in which to add new # keywords without having to resize the whole file. Say you want the header to # use 4 blocks by default. Then, excluding the END card which Astropy will add # automatically, create the header and pad it out to 36 * 4 cards. # # Create a stub array to initialize the HDU; its # exact size is irrelevant, as long as it has the desired number of # dimensions data = np.zeros((100, 100), dtype=np.float64) hdu = fits.PrimaryHDU(data=data) header = hdu.header while len(header) < (36 * 4 - 1): header.append() # Adds a blank card to the end ############################################################################## # Now adjust the NAXISn keywords to the desired size of the array, and write # only the header out to a file. Using the ``hdu.writeto()`` method will cause # astropy to "helpfully" reset the NAXISn keywords to match the size of the # dummy array. That is because it works hard to ensure that only valid FITS # files are written. Instead, we can write just the header to a file using the # `astropy.io.fits.Header.tofile` method: header['NAXIS1'] = 40000 header['NAXIS2'] = 40000 header.tofile('large.fits') ############################################################################## # Finally, grow out the end of the file to match the length of the # data (plus the length of the header). This can be done very efficiently on # most systems by seeking past the end of the file and writing a single byte, # like so: with open('large.fits', 'rb+') as fobj: # Seek past the length of the header, plus the length of the # Data we want to write. # 8 is the number of bytes per value, i.e. abs(header['BITPIX'])/8 # (this example is assuming a 64-bit float) # The -1 is to account for the final byte that we are about to # write: fobj.seek(len(header.tostring()) + (40000 * 40000 * 8) - 1) fobj.write(b'\0') ############################################################################## # More generally, this can be written: shape = tuple(header[f'NAXIS{ii}'] for ii in range(1, header['NAXIS']+1)) with open('large.fits', 'rb+') as fobj: fobj.seek(len(header.tostring()) + (np.prod(shape) * np.abs(header['BITPIX']//8)) - 1) fobj.write(b'\0') ############################################################################## # On modern operating systems this will cause the file (past the header) to be # filled with zeros out to the ~12GB needed to hold a 40000 x 40000 image. On # filesystems that support sparse file creation (most Linux filesystems, but not # the HFS+ filesystem used by most Macs) this is a very fast, efficient # operation. On other systems your mileage may vary. # # This isn't the only way to build up a large file, but probably one of the # safest. This method can also be used to create large multi-extension FITS # files, with a little care. ############################################################################## # Finally, we'll remove the file we created: os.remove('large.fits')
4aea8766341cf46517eea41d8e6201e6058d3fe87298d8eef394782c91168c37
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Bayesian Blocks for Time Series Analysis. Bayesian Blocks for Time Series Analysis ======================================== Dynamic programming algorithm for solving a piecewise-constant model for various datasets. This is based on the algorithm presented in Scargle et al 2013 [1]_. This code was ported from the astroML project [2]_. Applications include: - finding an optimal histogram with adaptive bin widths - finding optimal segmentation of time series data - detecting inflection points in the rate of event data The primary interface to these routines is the :func:`bayesian_blocks` function. This module provides fitness functions suitable for three types of data: - Irregularly-spaced event data via the :class:`Events` class - Regularly-spaced event data via the :class:`RegularEvents` class - Irregularly-spaced point measurements via the :class:`PointMeasures` class For more fine-tuned control over the fitness functions used, it is possible to define custom :class:`FitnessFunc` classes directly and use them with the :func:`bayesian_blocks` routine. One common application of the Bayesian Blocks algorithm is the determination of optimal adaptive-width histogram bins. This uses the same fitness function as for irregularly-spaced time series events. The easiest interface for creating Bayesian Blocks histograms is the :func:`astropy.stats.histogram` function. References ---------- .. [1] https://ui.adsabs.harvard.edu/abs/2013ApJ...764..167S .. [2] https://www.astroml.org/ https://github.com//astroML/astroML/ .. [3] Bellman, R.E., Dreyfus, S.E., 1962. Applied Dynamic Programming. Princeton University Press, Princeton. https://press.princeton.edu/books/hardcover/9780691651873/applied-dynamic-programming .. [4] Bellman, R., Roth, R., 1969. Curve fitting by segmented straight lines. J. Amer. Statist. Assoc. 64, 1079–1084. https://www.tandfonline.com/doi/abs/10.1080/01621459.1969.10501038 """ import warnings from inspect import signature import numpy as np from astropy.utils.exceptions import AstropyUserWarning # TODO: implement other fitness functions from appendix C of Scargle 2013 __all__ = ["FitnessFunc", "Events", "RegularEvents", "PointMeasures", "bayesian_blocks"] def bayesian_blocks(t, x=None, sigma=None, fitness="events", **kwargs): r"""Compute optimal segmentation of data with Scargle's Bayesian Blocks. This is a flexible implementation of the Bayesian Blocks algorithm described in Scargle 2013 [1]_. Parameters ---------- t : array-like data times (one dimensional, length N) x : array-like, optional data values sigma : array-like or float, optional data errors fitness : str or object the fitness function to use for the model. If a string, the following options are supported: - 'events' : binned or unbinned event data. Arguments are ``gamma``, which gives the slope of the prior on the number of bins, or ``ncp_prior``, which is :math:`-\ln({\tt gamma})`. - 'regular_events' : non-overlapping events measured at multiples of a fundamental tick rate, ``dt``, which must be specified as an additional argument. Extra arguments are ``p0``, which gives the false alarm probability to compute the prior, or ``gamma``, which gives the slope of the prior on the number of bins, or ``ncp_prior``, which is :math:`-\ln({\tt gamma})`. - 'measures' : fitness for a measured sequence with Gaussian errors. Extra arguments are ``p0``, which gives the false alarm probability to compute the prior, or ``gamma``, which gives the slope of the prior on the number of bins, or ``ncp_prior``, which is :math:`-\ln({\tt gamma})`. In all three cases, if more than one of ``p0``, ``gamma``, and ``ncp_prior`` is chosen, ``ncp_prior`` takes precedence over ``gamma`` which takes precedence over ``p0``. Alternatively, the fitness parameter can be an instance of :class:`FitnessFunc` or a subclass thereof. **kwargs : any additional keyword arguments will be passed to the specified :class:`FitnessFunc` derived class. Returns ------- edges : ndarray array containing the (N+1) edges defining the N bins Examples -------- .. testsetup:: >>> np.random.seed(12345) Event data: >>> t = np.random.normal(size=100) >>> edges = bayesian_blocks(t, fitness='events', p0=0.01) Event data with repeats: >>> t = np.random.normal(size=100) >>> t[80:] = t[:20] >>> edges = bayesian_blocks(t, fitness='events', p0=0.01) Regular event data: >>> dt = 0.05 >>> t = dt * np.arange(1000) >>> x = np.zeros(len(t)) >>> x[np.random.randint(0, len(t), len(t) // 10)] = 1 >>> edges = bayesian_blocks(t, x, fitness='regular_events', dt=dt) Measured point data with errors: >>> t = 100 * np.random.random(100) >>> x = np.exp(-0.5 * (t - 50) ** 2) >>> sigma = 0.1 >>> x_obs = np.random.normal(x, sigma) >>> edges = bayesian_blocks(t, x_obs, sigma, fitness='measures') References ---------- .. [1] Scargle, J et al. (2013) https://ui.adsabs.harvard.edu/abs/2013ApJ...764..167S .. [2] Bellman, R.E., Dreyfus, S.E., 1962. Applied Dynamic Programming. Princeton University Press, Princeton. https://press.princeton.edu/books/hardcover/9780691651873/applied-dynamic-programming .. [3] Bellman, R., Roth, R., 1969. Curve fitting by segmented straight lines. J. Amer. Statist. Assoc. 64, 1079–1084. https://www.tandfonline.com/doi/abs/10.1080/01621459.1969.10501038 See Also -------- astropy.stats.histogram : compute a histogram using bayesian blocks """ FITNESS_DICT = { "events": Events, "regular_events": RegularEvents, "measures": PointMeasures, } fitness = FITNESS_DICT.get(fitness, fitness) if type(fitness) is type and issubclass(fitness, FitnessFunc): fitfunc = fitness(**kwargs) elif isinstance(fitness, FitnessFunc): fitfunc = fitness else: raise ValueError("fitness parameter not understood") return fitfunc.fit(t, x, sigma) class FitnessFunc: """Base class for bayesian blocks fitness functions. Derived classes should overload the following method: ``fitness(self, **kwargs)``: Compute the fitness given a set of named arguments. Arguments accepted by fitness must be among ``[T_k, N_k, a_k, b_k, c_k]`` (See [1]_ for details on the meaning of these parameters). Additionally, other methods may be overloaded as well: ``__init__(self, **kwargs)``: Initialize the fitness function with any parameters beyond the normal ``p0`` and ``gamma``. ``validate_input(self, t, x, sigma)``: Enable specific checks of the input data (``t``, ``x``, ``sigma``) to be performed prior to the fit. ``compute_ncp_prior(self, N)``: If ``ncp_prior`` is not defined explicitly, this function is called in order to define it before fitting. This may be calculated from ``gamma``, ``p0``, or whatever method you choose. ``p0_prior(self, N)``: Specify the form of the prior given the false-alarm probability ``p0`` (See [1]_ for details). For examples of implemented fitness functions, see :class:`Events`, :class:`RegularEvents`, and :class:`PointMeasures`. References ---------- .. [1] Scargle, J et al. (2013) https://ui.adsabs.harvard.edu/abs/2013ApJ...764..167S """ def __init__(self, p0=0.05, gamma=None, ncp_prior=None): self.p0 = p0 self.gamma = gamma self.ncp_prior = ncp_prior def validate_input(self, t, x=None, sigma=None): """Validate inputs to the model. Parameters ---------- t : array-like times of observations x : array-like, optional values observed at each time sigma : float or array-like, optional errors in values x Returns ------- t, x, sigma : array-like, float or None validated and perhaps modified versions of inputs """ # validate array input t = np.asarray(t, dtype=float) # find unique values of t t = np.array(t) if t.ndim != 1: raise ValueError("t must be a one-dimensional array") unq_t, unq_ind, unq_inv = np.unique(t, return_index=True, return_inverse=True) # if x is not specified, x will be counts at each time if x is None: if sigma is not None: raise ValueError("If sigma is specified, x must be specified") else: sigma = 1 if len(unq_t) == len(t): x = np.ones_like(t) else: x = np.bincount(unq_inv) t = unq_t # if x is specified, then we need to simultaneously sort t and x else: # TODO: allow broadcasted x? x = np.asarray(x, dtype=float) if x.shape not in [(), (1,), (t.size,)]: raise ValueError("x does not match shape of t") x += np.zeros_like(t) if len(unq_t) != len(t): raise ValueError( "Repeated values in t not supported when x is specified" ) t = unq_t x = x[unq_ind] # verify the given sigma value if sigma is None: sigma = 1 else: sigma = np.asarray(sigma, dtype=float) if sigma.shape not in [(), (1,), (t.size,)]: raise ValueError("sigma does not match the shape of x") return t, x, sigma def fitness(self, **kwargs): raise NotImplementedError() def p0_prior(self, N): """Empirical prior, parametrized by the false alarm probability ``p0``. See eq. 21 in Scargle (2013). Note that there was an error in this equation in the original Scargle paper (the "log" was missing). The following corrected form is taken from https://arxiv.org/abs/1304.2818 """ return 4 - np.log(73.53 * self.p0 * (N**-0.478)) # the fitness_args property will return the list of arguments accepted by # the method fitness(). This allows more efficient computation below. @property def _fitness_args(self): return signature(self.fitness).parameters.keys() def compute_ncp_prior(self, N): """ If ``ncp_prior`` is not explicitly defined, compute it from ``gamma`` or ``p0``. """ if self.gamma is not None: return -np.log(self.gamma) elif self.p0 is not None: return self.p0_prior(N) else: raise ValueError( "``ncp_prior`` cannot be computed as neither " "``gamma`` nor ``p0`` is defined." ) def fit(self, t, x=None, sigma=None): """Fit the Bayesian Blocks model given the specified fitness function. Parameters ---------- t : array-like data times (one dimensional, length N) x : array-like, optional data values sigma : array-like or float, optional data errors Returns ------- edges : ndarray array containing the (M+1) edges defining the M optimal bins """ t, x, sigma = self.validate_input(t, x, sigma) # compute values needed for computation, below if "a_k" in self._fitness_args: ak_raw = np.ones_like(x) / sigma**2 if "b_k" in self._fitness_args: bk_raw = x / sigma**2 if "c_k" in self._fitness_args: ck_raw = x * x / sigma**2 # create length-(N + 1) array of cell edges edges = np.concatenate([t[:1], 0.5 * (t[1:] + t[:-1]), t[-1:]]) block_length = t[-1] - edges # arrays to store the best configuration N = len(t) best = np.zeros(N, dtype=float) last = np.zeros(N, dtype=int) # Compute ncp_prior if not defined if self.ncp_prior is None: ncp_prior = self.compute_ncp_prior(N) else: ncp_prior = self.ncp_prior # ---------------------------------------------------------------- # Start with first data cell; add one cell at each iteration # ---------------------------------------------------------------- for R in range(N): # Compute fit_vec : fitness of putative last block (end at R) kwds = {} # T_k: width/duration of each block if "T_k" in self._fitness_args: kwds["T_k"] = block_length[: (R + 1)] - block_length[R + 1] # N_k: number of elements in each block if "N_k" in self._fitness_args: kwds["N_k"] = np.cumsum(x[: (R + 1)][::-1])[::-1] # a_k: eq. 31 if "a_k" in self._fitness_args: kwds["a_k"] = 0.5 * np.cumsum(ak_raw[: (R + 1)][::-1])[::-1] # b_k: eq. 32 if "b_k" in self._fitness_args: kwds["b_k"] = -np.cumsum(bk_raw[: (R + 1)][::-1])[::-1] # c_k: eq. 33 if "c_k" in self._fitness_args: kwds["c_k"] = 0.5 * np.cumsum(ck_raw[: (R + 1)][::-1])[::-1] # evaluate fitness function fit_vec = self.fitness(**kwds) A_R = fit_vec - ncp_prior A_R[1:] += best[:R] i_max = np.argmax(A_R) last[R] = i_max best[R] = A_R[i_max] # ---------------------------------------------------------------- # Now find changepoints by iteratively peeling off the last block # ---------------------------------------------------------------- change_points = np.zeros(N, dtype=int) i_cp = N ind = N while i_cp > 0: i_cp -= 1 change_points[i_cp] = ind if ind == 0: break ind = last[ind - 1] if i_cp == 0: change_points[i_cp] = 0 change_points = change_points[i_cp:] return edges[change_points] class Events(FitnessFunc): r"""Bayesian blocks fitness for binned or unbinned events. Parameters ---------- p0 : float, optional False alarm probability, used to compute the prior on :math:`N_{\rm blocks}` (see eq. 21 of Scargle 2013). For the Events type data, ``p0`` does not seem to be an accurate representation of the actual false alarm probability. If you are using this fitness function for a triggering type condition, it is recommended that you run statistical trials on signal-free noise to determine an appropriate value of ``gamma`` or ``ncp_prior`` to use for a desired false alarm rate. gamma : float, optional If specified, then use this gamma to compute the general prior form, :math:`p \sim {\tt gamma}^{N_{\rm blocks}}`. If gamma is specified, p0 is ignored. ncp_prior : float, optional If specified, use the value of ``ncp_prior`` to compute the prior as above, using the definition :math:`{\tt ncp\_prior} = -\ln({\tt gamma})`. If ``ncp_prior`` is specified, ``gamma`` and ``p0`` is ignored. """ def fitness(self, N_k, T_k): # eq. 19 from Scargle 2013 return N_k * (np.log(N_k / T_k)) def validate_input(self, t, x, sigma): t, x, sigma = super().validate_input(t, x, sigma) if x is not None and np.any(x % 1 > 0): raise ValueError("x must be integer counts for fitness='events'") return t, x, sigma class RegularEvents(FitnessFunc): r"""Bayesian blocks fitness for regular events. This is for data which has a fundamental "tick" length, so that all measured values are multiples of this tick length. In each tick, there are either zero or one counts. Parameters ---------- dt : float tick rate for data p0 : float, optional False alarm probability, used to compute the prior on :math:`N_{\rm blocks}` (see eq. 21 of Scargle 2013). If gamma is specified, p0 is ignored. ncp_prior : float, optional If specified, use the value of ``ncp_prior`` to compute the prior as above, using the definition :math:`{\tt ncp\_prior} = -\ln({\tt gamma})`. If ``ncp_prior`` is specified, ``gamma`` and ``p0`` are ignored. """ def __init__(self, dt, p0=0.05, gamma=None, ncp_prior=None): self.dt = dt super().__init__(p0, gamma, ncp_prior) def validate_input(self, t, x, sigma): t, x, sigma = super().validate_input(t, x, sigma) if not np.all((x == 0) | (x == 1)): raise ValueError("Regular events must have only 0 and 1 in x") return t, x, sigma def fitness(self, T_k, N_k): # Eq. C23 of Scargle 2013 M_k = T_k / self.dt N_over_M = N_k / M_k eps = 1e-8 if np.any(N_over_M > 1 + eps): warnings.warn( "regular events: N/M > 1. Is the time step correct?", AstropyUserWarning, ) one_m_NM = 1 - N_over_M N_over_M[N_over_M <= 0] = 1 one_m_NM[one_m_NM <= 0] = 1 return N_k * np.log(N_over_M) + (M_k - N_k) * np.log(one_m_NM) class PointMeasures(FitnessFunc): r"""Bayesian blocks fitness for point measures. Parameters ---------- p0 : float, optional False alarm probability, used to compute the prior on :math:`N_{\rm blocks}` (see eq. 21 of Scargle 2013). If gamma is specified, p0 is ignored. ncp_prior : float, optional If specified, use the value of ``ncp_prior`` to compute the prior as above, using the definition :math:`{\tt ncp\_prior} = -\ln({\tt gamma})`. If ``ncp_prior`` is specified, ``gamma`` and ``p0`` are ignored. """ def __init__(self, p0=0.05, gamma=None, ncp_prior=None): super().__init__(p0, gamma, ncp_prior) def fitness(self, a_k, b_k): # eq. 41 from Scargle 2013 return (b_k * b_k) / (4 * a_k) def validate_input(self, t, x, sigma): if x is None: raise ValueError("x must be specified for point measures") return super().validate_input(t, x, sigma)
ba8c37608528b7d9752712495e3d112551183e310d92048a6daad7ee2df05baa
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains simple functions for dealing with circular statistics, for instance, mean, variance, standard deviation, correlation coefficient, and so on. This module also cover tests of uniformity, e.g., the Rayleigh and V tests. The Maximum Likelihood Estimator for the Von Mises distribution along with the Cramer-Rao Lower Bounds are also implemented. Almost all of the implementations are based on reference [1]_, which is also the basis for the R package 'CircStats' [2]_. """ import numpy as np from astropy.units import Quantity __all__ = [ "circmean", "circstd", "circvar", "circmoment", "circcorrcoef", "rayleightest", "vtest", "vonmisesmle", ] __doctest_requires__ = {"vtest": ["scipy"]} def _components(data, p=1, phi=0.0, axis=None, weights=None): # Utility function for computing the generalized rectangular components # of the circular data. if weights is None: weights = np.ones((1,)) try: weights = np.broadcast_to(weights, data.shape) except ValueError: raise ValueError("Weights and data have inconsistent shape.") C = np.sum(weights * np.cos(p * (data - phi)), axis) / np.sum(weights, axis) S = np.sum(weights * np.sin(p * (data - phi)), axis) / np.sum(weights, axis) return C, S def _angle(data, p=1, phi=0.0, axis=None, weights=None): # Utility function for computing the generalized sample mean angle C, S = _components(data, p, phi, axis, weights) # theta will be an angle in the interval [-np.pi, np.pi) # [-180, 180)*u.deg in case data is a Quantity theta = np.arctan2(S, C) if isinstance(data, Quantity): theta = theta.to(data.unit) return theta def _length(data, p=1, phi=0.0, axis=None, weights=None): # Utility function for computing the generalized sample length C, S = _components(data, p, phi, axis, weights) return np.hypot(S, C) def circmean(data, axis=None, weights=None): """Computes the circular mean angle of an array of circular data. Parameters ---------- data : ndarray or `~astropy.units.Quantity` Array of circular (directional) data, which is assumed to be in radians whenever ``data`` is ``numpy.ndarray``. axis : int, optional Axis along which circular means are computed. The default is to compute the mean of the flattened array. weights : numpy.ndarray, optional In case of grouped data, the i-th element of ``weights`` represents a weighting factor for each group such that ``sum(weights, axis)`` equals the number of observations. See [1]_, remark 1.4, page 22, for detailed explanation. Returns ------- circmean : ndarray or `~astropy.units.Quantity` Circular mean. Examples -------- >>> import numpy as np >>> from astropy.stats import circmean >>> from astropy import units as u >>> data = np.array([51, 67, 40, 109, 31, 358])*u.deg >>> circmean(data) # doctest: +FLOAT_CMP <Quantity 48.62718088722989 deg> References ---------- .. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics". Series on Multivariate Analysis, Vol. 5, 2001. .. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in Circular Statistics (2001)'". 2015. <https://cran.r-project.org/web/packages/CircStats/CircStats.pdf> """ return _angle(data, 1, 0.0, axis, weights) def circvar(data, axis=None, weights=None): """Computes the circular variance of an array of circular data. There are some concepts for defining measures of dispersion for circular data. The variance implemented here is based on the definition given by [1]_, which is also the same used by the R package 'CircStats' [2]_. Parameters ---------- data : ndarray or `~astropy.units.Quantity` Array of circular (directional) data, which is assumed to be in radians whenever ``data`` is ``numpy.ndarray``. Dimensionless, if Quantity. axis : int, optional Axis along which circular variances are computed. The default is to compute the variance of the flattened array. weights : numpy.ndarray, optional In case of grouped data, the i-th element of ``weights`` represents a weighting factor for each group such that ``sum(weights, axis)`` equals the number of observations. See [1]_, remark 1.4, page 22, for detailed explanation. Returns ------- circvar : ndarray or `~astropy.units.Quantity` ['dimensionless'] Circular variance. Examples -------- >>> import numpy as np >>> from astropy.stats import circvar >>> from astropy import units as u >>> data = np.array([51, 67, 40, 109, 31, 358])*u.deg >>> circvar(data) # doctest: +FLOAT_CMP <Quantity 0.16356352748437508> References ---------- .. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics". Series on Multivariate Analysis, Vol. 5, 2001. .. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in Circular Statistics (2001)'". 2015. <https://cran.r-project.org/web/packages/CircStats/CircStats.pdf> Notes ----- For Scipy < 1.9.0, ``scipy.stats.circvar`` uses a different definition based on an approximation using the limit of small angles that approaches the linear variance. For Scipy >= 1.9.0, ``scipy.stats.cirvar`` uses a definition consistent with this implementation. """ return 1.0 - _length(data, 1, 0.0, axis, weights) def circstd(data, axis=None, weights=None, method="angular"): """Computes the circular standard deviation of an array of circular data. The standard deviation implemented here is based on the definitions given by [1]_, which is also the same used by the R package 'CirStat' [2]_. Two methods are implemented: 'angular' and 'circular'. The former is defined as sqrt(2 * (1 - R)) and it is bounded in [0, 2*Pi]. The latter is defined as sqrt(-2 * ln(R)) and it is bounded in [0, inf]. Following 'CircStat' the default method used to obtain the standard deviation is 'angular'. Parameters ---------- data : ndarray or `~astropy.units.Quantity` Array of circular (directional) data, which is assumed to be in radians whenever ``data`` is ``numpy.ndarray``. If quantity, must be dimensionless. axis : int, optional Axis along which circular variances are computed. The default is to compute the variance of the flattened array. weights : numpy.ndarray, optional In case of grouped data, the i-th element of ``weights`` represents a weighting factor for each group such that ``sum(weights, axis)`` equals the number of observations. See [3]_, remark 1.4, page 22, for detailed explanation. method : str, optional The method used to estimate the standard deviation: - 'angular' : obtains the angular deviation - 'circular' : obtains the circular deviation Returns ------- circstd : ndarray or `~astropy.units.Quantity` ['dimensionless'] Angular or circular standard deviation. Examples -------- >>> import numpy as np >>> from astropy.stats import circstd >>> from astropy import units as u >>> data = np.array([51, 67, 40, 109, 31, 358])*u.deg >>> circstd(data) # doctest: +FLOAT_CMP <Quantity 0.57195022> Alternatively, using the 'circular' method: >>> import numpy as np >>> from astropy.stats import circstd >>> from astropy import units as u >>> data = np.array([51, 67, 40, 109, 31, 358])*u.deg >>> circstd(data, method='circular') # doctest: +FLOAT_CMP <Quantity 0.59766999> References ---------- .. [1] P. Berens. "CircStat: A MATLAB Toolbox for Circular Statistics". Journal of Statistical Software, vol 31, issue 10, 2009. .. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in Circular Statistics (2001)'". 2015. <https://cran.r-project.org/web/packages/CircStats/CircStats.pdf> .. [3] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics". Series on Multivariate Analysis, Vol. 5, 2001. """ if method not in ("angular", "circular"): raise ValueError("method should be either 'angular' or 'circular'") if method == "angular": return np.sqrt(2.0 * (1.0 - _length(data, 1, 0.0, axis, weights))) else: return np.sqrt(-2.0 * np.log(_length(data, 1, 0.0, axis, weights))) def circmoment(data, p=1.0, centered=False, axis=None, weights=None): """Computes the ``p``-th trigonometric circular moment for an array of circular data. Parameters ---------- data : ndarray or `~astropy.units.Quantity` Array of circular (directional) data, which is assumed to be in radians whenever ``data`` is ``numpy.ndarray``. p : float, optional Order of the circular moment. centered : bool, optional If ``True``, central circular moments are computed. Default value is ``False``. axis : int, optional Axis along which circular moments are computed. The default is to compute the circular moment of the flattened array. weights : numpy.ndarray, optional In case of grouped data, the i-th element of ``weights`` represents a weighting factor for each group such that ``sum(weights, axis)`` equals the number of observations. See [1]_, remark 1.4, page 22, for detailed explanation. Returns ------- circmoment : ndarray or `~astropy.units.Quantity` The first and second elements correspond to the direction and length of the ``p``-th circular moment, respectively. Examples -------- >>> import numpy as np >>> from astropy.stats import circmoment >>> from astropy import units as u >>> data = np.array([51, 67, 40, 109, 31, 358])*u.deg >>> circmoment(data, p=2) # doctest: +FLOAT_CMP (<Quantity 90.99263082432564 deg>, <Quantity 0.48004283892950717>) References ---------- .. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics". Series on Multivariate Analysis, Vol. 5, 2001. .. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in Circular Statistics (2001)'". 2015. <https://cran.r-project.org/web/packages/CircStats/CircStats.pdf> """ if centered: phi = circmean(data, axis, weights) else: phi = 0.0 return _angle(data, p, phi, axis, weights), _length(data, p, phi, axis, weights) def circcorrcoef(alpha, beta, axis=None, weights_alpha=None, weights_beta=None): """Computes the circular correlation coefficient between two array of circular data. Parameters ---------- alpha : ndarray or `~astropy.units.Quantity` Array of circular (directional) data, which is assumed to be in radians whenever ``data`` is ``numpy.ndarray``. beta : ndarray or `~astropy.units.Quantity` Array of circular (directional) data, which is assumed to be in radians whenever ``data`` is ``numpy.ndarray``. axis : int, optional Axis along which circular correlation coefficients are computed. The default is the compute the circular correlation coefficient of the flattened array. weights_alpha : numpy.ndarray, optional In case of grouped data, the i-th element of ``weights_alpha`` represents a weighting factor for each group such that ``sum(weights_alpha, axis)`` equals the number of observations. See [1]_, remark 1.4, page 22, for detailed explanation. weights_beta : numpy.ndarray, optional See description of ``weights_alpha``. Returns ------- rho : ndarray or `~astropy.units.Quantity` ['dimensionless'] Circular correlation coefficient. Examples -------- >>> import numpy as np >>> from astropy.stats import circcorrcoef >>> from astropy import units as u >>> alpha = np.array([356, 97, 211, 232, 343, 292, 157, 302, 335, 302, ... 324, 85, 324, 340, 157, 238, 254, 146, 232, 122, ... 329])*u.deg >>> beta = np.array([119, 162, 221, 259, 270, 29, 97, 292, 40, 313, 94, ... 45, 47, 108, 221, 270, 119, 248, 270, 45, 23])*u.deg >>> circcorrcoef(alpha, beta) # doctest: +FLOAT_CMP <Quantity 0.2704648826748831> References ---------- .. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics". Series on Multivariate Analysis, Vol. 5, 2001. .. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in Circular Statistics (2001)'". 2015. <https://cran.r-project.org/web/packages/CircStats/CircStats.pdf> """ if np.size(alpha, axis) != np.size(beta, axis): raise ValueError("alpha and beta must be arrays of the same size") mu_a = circmean(alpha, axis, weights_alpha) mu_b = circmean(beta, axis, weights_beta) sin_a = np.sin(alpha - mu_a) sin_b = np.sin(beta - mu_b) rho = np.sum(sin_a * sin_b) / np.sqrt(np.sum(sin_a * sin_a) * np.sum(sin_b * sin_b)) return rho def rayleightest(data, axis=None, weights=None): """Performs the Rayleigh test of uniformity. This test is used to identify a non-uniform distribution, i.e. it is designed for detecting an unimodal deviation from uniformity. More precisely, it assumes the following hypotheses: - H0 (null hypothesis): The population is distributed uniformly around the circle. - H1 (alternative hypothesis): The population is not distributed uniformly around the circle. Small p-values suggest to reject the null hypothesis. Parameters ---------- data : ndarray or `~astropy.units.Quantity` Array of circular (directional) data, which is assumed to be in radians whenever ``data`` is ``numpy.ndarray``. axis : int, optional Axis along which the Rayleigh test will be performed. weights : numpy.ndarray, optional In case of grouped data, the i-th element of ``weights`` represents a weighting factor for each group such that ``np.sum(weights, axis)`` equals the number of observations. See [1]_, remark 1.4, page 22, for detailed explanation. Returns ------- p-value : float or `~astropy.units.Quantity` ['dimensionless'] Examples -------- >>> import numpy as np >>> from astropy.stats import rayleightest >>> from astropy import units as u >>> data = np.array([130, 90, 0, 145])*u.deg >>> rayleightest(data) # doctest: +FLOAT_CMP <Quantity 0.2563487733797317> References ---------- .. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics". Series on Multivariate Analysis, Vol. 5, 2001. .. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in Circular Statistics (2001)'". 2015. <https://cran.r-project.org/web/packages/CircStats/CircStats.pdf> .. [3] M. Chirstman., C. Miller. "Testing a Sample of Directions for Uniformity." Lecture Notes, STA 6934/5805. University of Florida, 2007. .. [4] D. Wilkie. "Rayleigh Test for Randomness of Circular Data". Applied Statistics. 1983. <http://wexler.free.fr/library/files/wilkie%20(1983)%20rayleigh%20test%20for%20randomness%20of%20circular%20data.pdf> """ n = np.size(data, axis=axis) Rbar = _length(data, 1, 0.0, axis, weights) z = n * Rbar * Rbar # see [3] and [4] for the formulae below tmp = 1.0 if n < 50: tmp = ( 1.0 + (2.0 * z - z * z) / (4.0 * n) - (24.0 * z - 132.0 * z**2.0 + 76.0 * z**3.0 - 9.0 * z**4.0) / (288.0 * n * n) ) p_value = np.exp(-z) * tmp return p_value def vtest(data, mu=0.0, axis=None, weights=None): """Performs the Rayleigh test of uniformity where the alternative hypothesis H1 is assumed to have a known mean angle ``mu``. Parameters ---------- data : ndarray or `~astropy.units.Quantity` Array of circular (directional) data, which is assumed to be in radians whenever ``data`` is ``numpy.ndarray``. mu : float or `~astropy.units.Quantity` ['angle'], optional Mean angle. Assumed to be known. axis : int, optional Axis along which the V test will be performed. weights : numpy.ndarray, optional In case of grouped data, the i-th element of ``weights`` represents a weighting factor for each group such that ``sum(weights, axis)`` equals the number of observations. See [1]_, remark 1.4, page 22, for detailed explanation. Returns ------- p-value : float or `~astropy.units.Quantity` ['dimensionless'] Examples -------- >>> import numpy as np >>> from astropy.stats import vtest >>> from astropy import units as u >>> data = np.array([130, 90, 0, 145])*u.deg >>> vtest(data) # doctest: +FLOAT_CMP <Quantity 0.6223678199713766> References ---------- .. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics". Series on Multivariate Analysis, Vol. 5, 2001. .. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in Circular Statistics (2001)'". 2015. <https://cran.r-project.org/web/packages/CircStats/CircStats.pdf> .. [3] M. Chirstman., C. Miller. "Testing a Sample of Directions for Uniformity." Lecture Notes, STA 6934/5805. University of Florida, 2007. """ from scipy.stats import norm if weights is None: weights = np.ones((1,)) try: weights = np.broadcast_to(weights, data.shape) except ValueError: raise ValueError("Weights and data have inconsistent shape.") n = np.size(data, axis=axis) R0bar = np.sum(weights * np.cos(data - mu), axis) / np.sum(weights, axis) z = np.sqrt(2.0 * n) * R0bar pz = norm.cdf(z) fz = norm.pdf(z) # see reference [3] p_value = ( 1 - pz + fz * ( (3 * z - z**3) / (16.0 * n) + (15 * z + 305 * z**3 - 125 * z**5 + 9 * z**7) / (4608.0 * n * n) ) ) return p_value def _A1inv(x): # Approximation for _A1inv(x) according R Package 'CircStats' # See http://www.scienceasia.org/2012.38.n1/scias38_118.pdf, equation (4) if 0 <= x < 0.53: return 2.0 * x + x * x * x + (5.0 * x**5) / 6.0 elif x < 0.85: return -0.4 + 1.39 * x + 0.43 / (1.0 - x) else: return 1.0 / (x * x * x - 4.0 * x * x + 3.0 * x) def vonmisesmle(data, axis=None): """Computes the Maximum Likelihood Estimator (MLE) for the parameters of the von Mises distribution. Parameters ---------- data : ndarray or `~astropy.units.Quantity` Array of circular (directional) data, which is assumed to be in radians whenever ``data`` is ``numpy.ndarray``. axis : int, optional Axis along which the mle will be computed. Returns ------- mu : float or `~astropy.units.Quantity` The mean (aka location parameter). kappa : float or `~astropy.units.Quantity` ['dimensionless'] The concentration parameter. Examples -------- >>> import numpy as np >>> from astropy.stats import vonmisesmle >>> from astropy import units as u >>> data = np.array([130, 90, 0, 145])*u.deg >>> vonmisesmle(data) # doctest: +FLOAT_CMP (<Quantity 101.16894320013179 deg>, <Quantity 1.49358958737054>) References ---------- .. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics". Series on Multivariate Analysis, Vol. 5, 2001. .. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in Circular Statistics (2001)'". 2015. <https://cran.r-project.org/web/packages/CircStats/CircStats.pdf> """ mu = circmean(data, axis=None) kappa = _A1inv(np.mean(np.cos(data - mu), axis)) return mu, kappa
e0e4ce8b4d980fa76250d1133c0b2f32cc1e06b64d3d251d44068f092c929757
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains simple statistical algorithms that are straightforwardly implemented as a single python function (or family of functions). This module should generally not be used directly. Everything in `__all__` is imported into `astropy.stats`, and hence that package should be used for access. """ import math import numpy as np from . import _stats __all__ = [ "gaussian_fwhm_to_sigma", "gaussian_sigma_to_fwhm", "binom_conf_interval", "binned_binom_proportion", "poisson_conf_interval", "median_absolute_deviation", "mad_std", "signal_to_noise_oir_ccd", "bootstrap", "kuiper", "kuiper_two", "kuiper_false_positive_probability", "cdf_from_intervals", "interval_overlap_length", "histogram_intervals", "fold_intervals", ] __doctest_skip__ = ["binned_binom_proportion"] __doctest_requires__ = { "binom_conf_interval": ["scipy"], "poisson_conf_interval": ["scipy"], } gaussian_sigma_to_fwhm = 2.0 * math.sqrt(2.0 * math.log(2.0)) """ Factor with which to multiply Gaussian 1-sigma standard deviation to convert it to full width at half maximum (FWHM). """ gaussian_fwhm_to_sigma = 1.0 / gaussian_sigma_to_fwhm """ Factor with which to multiply Gaussian full width at half maximum (FWHM) to convert it to 1-sigma standard deviation. """ def binom_conf_interval(k, n, confidence_level=0.68269, interval="wilson"): r"""Binomial proportion confidence interval given k successes, n trials. Parameters ---------- k : int or numpy.ndarray Number of successes (0 <= ``k`` <= ``n``). n : int or numpy.ndarray Number of trials (``n`` > 0). If both ``k`` and ``n`` are arrays, they must have the same shape. confidence_level : float, optional Desired probability content of interval. Default is 0.68269, corresponding to 1 sigma in a 1-dimensional Gaussian distribution. Confidence level must be in range [0, 1]. interval : {'wilson', 'jeffreys', 'flat', 'wald'}, optional Formula used for confidence interval. See notes for details. The ``'wilson'`` and ``'jeffreys'`` intervals generally give similar results, while 'flat' is somewhat different, especially for small values of ``n``. ``'wilson'`` should be somewhat faster than ``'flat'`` or ``'jeffreys'``. The 'wald' interval is generally not recommended. It is provided for comparison purposes. Default is ``'wilson'``. Returns ------- conf_interval : ndarray ``conf_interval[0]`` and ``conf_interval[1]`` correspond to the lower and upper limits, respectively, for each element in ``k``, ``n``. Notes ----- In situations where a probability of success is not known, it can be estimated from a number of trials (n) and number of observed successes (k). For example, this is done in Monte Carlo experiments designed to estimate a detection efficiency. It is simple to take the sample proportion of successes (k/n) as a reasonable best estimate of the true probability :math:`\epsilon`. However, deriving an accurate confidence interval on :math:`\epsilon` is non-trivial. There are several formulas for this interval (see [1]_). Four intervals are implemented here: **1. The Wilson Interval.** This interval, attributed to Wilson [2]_, is given by .. math:: CI_{\rm Wilson} = \frac{k + \kappa^2/2}{n + \kappa^2} \pm \frac{\kappa n^{1/2}}{n + \kappa^2} ((\hat{\epsilon}(1 - \hat{\epsilon}) + \kappa^2/(4n))^{1/2} where :math:`\hat{\epsilon} = k / n` and :math:`\kappa` is the number of standard deviations corresponding to the desired confidence interval for a *normal* distribution (for example, 1.0 for a confidence interval of 68.269%). For a confidence interval of 100(1 - :math:`\alpha`)%, .. math:: \kappa = \Phi^{-1}(1-\alpha/2) = \sqrt{2}{\rm erf}^{-1}(1-\alpha). **2. The Jeffreys Interval.** This interval is derived by applying Bayes' theorem to the binomial distribution with the noninformative Jeffreys prior [3]_, [4]_. The noninformative Jeffreys prior is the Beta distribution, Beta(1/2, 1/2), which has the density function .. math:: f(\epsilon) = \pi^{-1} \epsilon^{-1/2}(1-\epsilon)^{-1/2}. The justification for this prior is that it is invariant under reparameterizations of the binomial proportion. The posterior density function is also a Beta distribution: Beta(k + 1/2, n - k + 1/2). The interval is then chosen so that it is *equal-tailed*: Each tail (outside the interval) contains :math:`\alpha`/2 of the posterior probability, and the interval itself contains 1 - :math:`\alpha`. This interval must be calculated numerically. Additionally, when k = 0 the lower limit is set to 0 and when k = n the upper limit is set to 1, so that in these cases, there is only one tail containing :math:`\alpha`/2 and the interval itself contains 1 - :math:`\alpha`/2 rather than the nominal 1 - :math:`\alpha`. **3. A Flat prior.** This is similar to the Jeffreys interval, but uses a flat (uniform) prior on the binomial proportion over the range 0 to 1 rather than the reparametrization-invariant Jeffreys prior. The posterior density function is a Beta distribution: Beta(k + 1, n - k + 1). The same comments about the nature of the interval (equal-tailed, etc.) also apply to this option. **4. The Wald Interval.** This interval is given by .. math:: CI_{\rm Wald} = \hat{\epsilon} \pm \kappa \sqrt{\frac{\hat{\epsilon}(1-\hat{\epsilon})}{n}} The Wald interval gives acceptable results in some limiting cases. Particularly, when n is very large, and the true proportion :math:`\epsilon` is not "too close" to 0 or 1. However, as the later is not verifiable when trying to estimate :math:`\epsilon`, this is not very helpful. Its use is not recommended, but it is provided here for comparison purposes due to its prevalence in everyday practical statistics. This function requires ``scipy`` for all interval types. References ---------- .. [1] Brown, Lawrence D.; Cai, T. Tony; DasGupta, Anirban (2001). "Interval Estimation for a Binomial Proportion". Statistical Science 16 (2): 101-133. doi:10.1214/ss/1009213286 .. [2] Wilson, E. B. (1927). "Probable inference, the law of succession, and statistical inference". Journal of the American Statistical Association 22: 209-212. .. [3] Jeffreys, Harold (1946). "An Invariant Form for the Prior Probability in Estimation Problems". Proc. R. Soc. Lond.. A 24 186 (1007): 453-461. doi:10.1098/rspa.1946.0056 .. [4] Jeffreys, Harold (1998). Theory of Probability. Oxford University Press, 3rd edition. ISBN 978-0198503682 Examples -------- Integer inputs return an array with shape (2,): >>> binom_conf_interval(4, 5, interval='wilson') # doctest: +FLOAT_CMP array([0.57921724, 0.92078259]) Arrays of arbitrary dimension are supported. The Wilson and Jeffreys intervals give similar results, even for small k, n: >>> binom_conf_interval([1, 2], 5, interval='wilson') # doctest: +FLOAT_CMP array([[0.07921741, 0.21597328], [0.42078276, 0.61736012]]) >>> binom_conf_interval([1, 2,], 5, interval='jeffreys') # doctest: +FLOAT_CMP array([[0.0842525 , 0.21789949], [0.42218001, 0.61753691]]) >>> binom_conf_interval([1, 2], 5, interval='flat') # doctest: +FLOAT_CMP array([[0.12139799, 0.24309021], [0.45401727, 0.61535699]]) In contrast, the Wald interval gives poor results for small k, n. For k = 0 or k = n, the interval always has zero length. >>> binom_conf_interval([1, 2], 5, interval='wald') # doctest: +FLOAT_CMP array([[0.02111437, 0.18091075], [0.37888563, 0.61908925]]) For confidence intervals approaching 1, the Wald interval for 0 < k < n can give intervals that extend outside [0, 1]: >>> binom_conf_interval([1, 2], 5, interval='wald', confidence_level=0.99) # doctest: +FLOAT_CMP array([[-0.26077835, -0.16433593], [ 0.66077835, 0.96433593]]) """ if confidence_level < 0.0 or confidence_level > 1.0: raise ValueError("confidence_level must be between 0. and 1.") alpha = 1.0 - confidence_level k = np.asarray(k).astype(int) n = np.asarray(n).astype(int) if (n <= 0).any(): raise ValueError("n must be positive") if (k < 0).any() or (k > n).any(): raise ValueError("k must be in {0, 1, .., n}") if interval == "wilson" or interval == "wald": from scipy.special import erfinv kappa = np.sqrt(2.0) * min(erfinv(confidence_level), 1.0e10) # Avoid overflows. k = k.astype(float) n = n.astype(float) p = k / n if interval == "wilson": midpoint = (k + kappa**2 / 2.0) / (n + kappa**2) halflength = ( (kappa * np.sqrt(n)) / (n + kappa**2) * np.sqrt(p * (1 - p) + kappa**2 / (4 * n)) ) conf_interval = np.array([midpoint - halflength, midpoint + halflength]) # Correct intervals out of range due to floating point errors. conf_interval[conf_interval < 0.0] = 0.0 conf_interval[conf_interval > 1.0] = 1.0 else: midpoint = p halflength = kappa * np.sqrt(p * (1.0 - p) / n) conf_interval = np.array([midpoint - halflength, midpoint + halflength]) elif interval == "jeffreys" or interval == "flat": from scipy.special import betaincinv if interval == "jeffreys": lowerbound = betaincinv(k + 0.5, n - k + 0.5, 0.5 * alpha) upperbound = betaincinv(k + 0.5, n - k + 0.5, 1.0 - 0.5 * alpha) else: lowerbound = betaincinv(k + 1, n - k + 1, 0.5 * alpha) upperbound = betaincinv(k + 1, n - k + 1, 1.0 - 0.5 * alpha) # Set lower or upper bound to k/n when k/n = 0 or 1 # We have to treat the special case of k/n being scalars, # which is an ugly kludge if lowerbound.ndim == 0: if k == 0: lowerbound = 0.0 elif k == n: upperbound = 1.0 else: lowerbound[k == 0] = 0 upperbound[k == n] = 1 conf_interval = np.array([lowerbound, upperbound]) else: raise ValueError(f"Unrecognized interval: {interval:s}") return conf_interval def binned_binom_proportion( x, success, bins=10, range=None, confidence_level=0.68269, interval="wilson" ): """Binomial proportion and confidence interval in bins of a continuous variable ``x``. Given a set of datapoint pairs where the ``x`` values are continuously distributed and the ``success`` values are binomial ("success / failure" or "true / false"), place the pairs into bins according to ``x`` value and calculate the binomial proportion (fraction of successes) and confidence interval in each bin. Parameters ---------- x : sequence Values. success : sequence of bool Success (`True`) or failure (`False`) corresponding to each value in ``x``. Must be same length as ``x``. bins : int or sequence of scalar, optional If bins is an int, it defines the number of equal-width bins in the given range (10, by default). If bins is a sequence, it defines the bin edges, including the rightmost edge, allowing for non-uniform bin widths (in this case, 'range' is ignored). range : (float, float), optional The lower and upper range of the bins. If `None` (default), the range is set to ``(x.min(), x.max())``. Values outside the range are ignored. confidence_level : float, optional Must be in range [0, 1]. Desired probability content in the confidence interval ``(p - perr[0], p + perr[1])`` in each bin. Default is 0.68269. interval : {'wilson', 'jeffreys', 'flat', 'wald'}, optional Formula used to calculate confidence interval on the binomial proportion in each bin. See `binom_conf_interval` for definition of the intervals. The 'wilson', 'jeffreys', and 'flat' intervals generally give similar results. 'wilson' should be somewhat faster, while 'jeffreys' and 'flat' are marginally superior, but differ in the assumed prior. The 'wald' interval is generally not recommended. It is provided for comparison purposes. Default is 'wilson'. Returns ------- bin_ctr : ndarray Central value of bins. Bins without any entries are not returned. bin_halfwidth : ndarray Half-width of each bin such that ``bin_ctr - bin_halfwidth`` and ``bin_ctr + bins_halfwidth`` give the left and right side of each bin, respectively. p : ndarray Efficiency in each bin. perr : ndarray 2-d array of shape (2, len(p)) representing the upper and lower uncertainty on p in each bin. Notes ----- This function requires ``scipy`` for all interval types. See Also -------- binom_conf_interval : Function used to estimate confidence interval in each bin. Examples -------- Suppose we wish to estimate the efficiency of a survey in detecting astronomical sources as a function of magnitude (i.e., the probability of detecting a source given its magnitude). In a realistic case, we might prepare a large number of sources with randomly selected magnitudes, inject them into simulated images, and then record which were detected at the end of the reduction pipeline. As a toy example, we generate 100 data points with randomly selected magnitudes between 20 and 30 and "observe" them with a known detection function (here, the error function, with 50% detection probability at magnitude 25): >>> from scipy.special import erf >>> from scipy.stats.distributions import binom >>> def true_efficiency(x): ... return 0.5 - 0.5 * erf((x - 25.) / 2.) >>> mag = 20. + 10. * np.random.rand(100) >>> detected = binom.rvs(1, true_efficiency(mag)) >>> bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20) >>> plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o', ... label='estimate') .. plot:: import numpy as np from scipy.special import erf from scipy.stats.distributions import binom import matplotlib.pyplot as plt from astropy.stats import binned_binom_proportion def true_efficiency(x): return 0.5 - 0.5 * erf((x - 25.) / 2.) np.random.seed(400) mag = 20. + 10. * np.random.rand(100) np.random.seed(600) detected = binom.rvs(1, true_efficiency(mag)) bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20) plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o', label='estimate') X = np.linspace(20., 30., 1000) plt.plot(X, true_efficiency(X), label='true efficiency') plt.ylim(0., 1.) plt.title('Detection efficiency vs magnitude') plt.xlabel('Magnitude') plt.ylabel('Detection efficiency') plt.legend() plt.show() The above example uses the Wilson confidence interval to calculate the uncertainty ``perr`` in each bin (see the definition of various confidence intervals in `binom_conf_interval`). A commonly used alternative is the Wald interval. However, the Wald interval can give nonsensical uncertainties when the efficiency is near 0 or 1, and is therefore **not** recommended. As an illustration, the following example shows the same data as above but uses the Wald interval rather than the Wilson interval to calculate ``perr``: >>> bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20, ... interval='wald') >>> plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o', ... label='estimate') .. plot:: import numpy as np from scipy.special import erf from scipy.stats.distributions import binom import matplotlib.pyplot as plt from astropy.stats import binned_binom_proportion def true_efficiency(x): return 0.5 - 0.5 * erf((x - 25.) / 2.) np.random.seed(400) mag = 20. + 10. * np.random.rand(100) np.random.seed(600) detected = binom.rvs(1, true_efficiency(mag)) bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20, interval='wald') plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o', label='estimate') X = np.linspace(20., 30., 1000) plt.plot(X, true_efficiency(X), label='true efficiency') plt.ylim(0., 1.) plt.title('The Wald interval can give nonsensical uncertainties') plt.xlabel('Magnitude') plt.ylabel('Detection efficiency') plt.legend() plt.show() """ x = np.ravel(x) success = np.ravel(success).astype(bool) if x.shape != success.shape: raise ValueError("sizes of x and success must match") # Put values into a histogram (`n`). Put "successful" values # into a second histogram (`k`) with identical binning. n, bin_edges = np.histogram(x, bins=bins, range=range) k, bin_edges = np.histogram(x[success], bins=bin_edges) bin_ctr = (bin_edges[:-1] + bin_edges[1:]) / 2.0 bin_halfwidth = bin_ctr - bin_edges[:-1] # Remove bins with zero entries. valid = n > 0 bin_ctr = bin_ctr[valid] bin_halfwidth = bin_halfwidth[valid] n = n[valid] k = k[valid] p = k / n bounds = binom_conf_interval( k, n, confidence_level=confidence_level, interval=interval ) perr = np.abs(bounds - p) return bin_ctr, bin_halfwidth, p, perr def _check_poisson_conf_inputs(sigma, background, confidence_level, name): if sigma != 1: raise ValueError(f"Only sigma=1 supported for interval {name}") if background != 0: raise ValueError(f"background not supported for interval {name}") if confidence_level is not None: raise ValueError(f"confidence_level not supported for interval {name}") def poisson_conf_interval( n, interval="root-n", sigma=1, background=0, confidence_level=None ): r"""Poisson parameter confidence interval given observed counts. Parameters ---------- n : int or numpy.ndarray Number of counts (0 <= ``n``). interval : {'root-n','root-n-0','pearson','sherpagehrels','frequentist-confidence', 'kraft-burrows-nousek'}, optional Formula used for confidence interval. See notes for details. Default is ``'root-n'``. sigma : float, optional Number of sigma for confidence interval; only supported for the 'frequentist-confidence' mode. background : float, optional Number of counts expected from the background; only supported for the 'kraft-burrows-nousek' mode. This number is assumed to be determined from a large region so that the uncertainty on its value is negligible. confidence_level : float, optional Confidence level between 0 and 1; only supported for the 'kraft-burrows-nousek' mode. Returns ------- conf_interval : ndarray ``conf_interval[0]`` and ``conf_interval[1]`` correspond to the lower and upper limits, respectively, for each element in ``n``. Notes ----- The "right" confidence interval to use for Poisson data is a matter of debate. The CDF working group `recommends <https://web.archive.org/web/20210222093249/https://www-cdf.fnal.gov/physics/statistics/notes/pois_eb.txt>`_ using root-n throughout, largely in the interest of comprehensibility, but discusses other possibilities. The ATLAS group also discusses several possibilities but concludes that no single representation is suitable for all cases. The suggestion has also been `floated <https://ui.adsabs.harvard.edu/abs/2012EPJP..127...24A>`_ that error bars should be attached to theoretical predictions instead of observed data, which this function will not help with (but it's easy; then you really should use the square root of the theoretical prediction). The intervals implemented here are: **1. 'root-n'** This is a very widely used standard rule derived from the maximum-likelihood estimator for the mean of the Poisson process. While it produces questionable results for small n and outright wrong results for n=0, it is standard enough that people are (supposedly) used to interpreting these wonky values. The interval is .. math:: CI = (n-\sqrt{n}, n+\sqrt{n}) **2. 'root-n-0'** This is identical to the above except that where n is zero the interval returned is (0,1). **3. 'pearson'** This is an only-slightly-more-complicated rule based on Pearson's chi-squared rule (as `explained <https://web.archive.org/web/20210222093249/https://www-cdf.fnal.gov/physics/statistics/notes/pois_eb.txt>`_ by the CDF working group). It also has the nice feature that if your theory curve touches an endpoint of the interval, then your data point is indeed one sigma away. The interval is .. math:: CI = (n+0.5-\sqrt{n+0.25}, n+0.5+\sqrt{n+0.25}) **4. 'sherpagehrels'** This rule is used by default in the fitting package 'sherpa'. The `documentation <https://cxc.cfa.harvard.edu/sherpa/statistics/#chigehrels>`_ claims it is based on a numerical approximation published in `Gehrels (1986) <https://ui.adsabs.harvard.edu/abs/1986ApJ...303..336G>`_ but it does not actually appear there. It is symmetrical, and while the upper limits are within about 1% of those given by 'frequentist-confidence', the lower limits can be badly wrong. The interval is .. math:: CI = (n-1-\sqrt{n+0.75}, n+1+\sqrt{n+0.75}) **5. 'frequentist-confidence'** These are frequentist central confidence intervals: .. math:: CI = (0.5 F_{\chi^2}^{-1}(\alpha;2n), 0.5 F_{\chi^2}^{-1}(1-\alpha;2(n+1))) where :math:`F_{\chi^2}^{-1}` is the quantile of the chi-square distribution with the indicated number of degrees of freedom and :math:`\alpha` is the one-tailed probability of the normal distribution (at the point given by the parameter 'sigma'). See `Maxwell (2011) <https://ui.adsabs.harvard.edu/abs/2011arXiv1102.0822M>`_ for further details. **6. 'kraft-burrows-nousek'** This is a Bayesian approach which allows for the presence of a known background :math:`B` in the source signal :math:`N`. For a given confidence level :math:`CL` the confidence interval :math:`[S_\mathrm{min}, S_\mathrm{max}]` is given by: .. math:: CL = \int^{S_\mathrm{max}}_{S_\mathrm{min}} f_{N,B}(S)dS where the function :math:`f_{N,B}` is: .. math:: f_{N,B}(S) = C \frac{e^{-(S+B)}(S+B)^N}{N!} and the normalization constant :math:`C`: .. math:: C = \left[ \int_0^\infty \frac{e^{-(S+B)}(S+B)^N}{N!} dS \right] ^{-1} = \left( \sum^N_{n=0} \frac{e^{-B}B^n}{n!} \right)^{-1} See `Kraft, Burrows, and Nousek (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_ for further details. These formulas implement a positive, uniform prior. `Kraft, Burrows, and Nousek (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_ discuss this choice in more detail and show that the problem is relatively insensitive to the choice of prior. This function has an optional dependency: Either `Scipy <https://www.scipy.org/>`_ or `mpmath <http://mpmath.org/>`_ need to be available (Scipy works only for N < 100). This code is very intense numerically, which makes it much slower than the other methods, in particular for large count numbers (above 1000 even with ``mpmath``). Fortunately, some of the other methods or a Gaussian approximation usually work well in this regime. Examples -------- >>> poisson_conf_interval(np.arange(10), interval='root-n').T array([[ 0. , 0. ], [ 0. , 2. ], [ 0.58578644, 3.41421356], [ 1.26794919, 4.73205081], [ 2. , 6. ], [ 2.76393202, 7.23606798], [ 3.55051026, 8.44948974], [ 4.35424869, 9.64575131], [ 5.17157288, 10.82842712], [ 6. , 12. ]]) >>> poisson_conf_interval(np.arange(10), interval='root-n-0').T array([[ 0. , 1. ], [ 0. , 2. ], [ 0.58578644, 3.41421356], [ 1.26794919, 4.73205081], [ 2. , 6. ], [ 2.76393202, 7.23606798], [ 3.55051026, 8.44948974], [ 4.35424869, 9.64575131], [ 5.17157288, 10.82842712], [ 6. , 12. ]]) >>> poisson_conf_interval(np.arange(10), interval='pearson').T array([[ 0. , 1. ], [ 0.38196601, 2.61803399], [ 1. , 4. ], [ 1.69722436, 5.30277564], [ 2.43844719, 6.56155281], [ 3.20871215, 7.79128785], [ 4. , 9. ], [ 4.8074176 , 10.1925824 ], [ 5.62771868, 11.37228132], [ 6.45861873, 12.54138127]]) >>> poisson_conf_interval( ... np.arange(10), interval='frequentist-confidence').T array([[ 0. , 1.84102165], [ 0.17275378, 3.29952656], [ 0.70818544, 4.63785962], [ 1.36729531, 5.91818583], [ 2.08566081, 7.16275317], [ 2.84030886, 8.38247265], [ 3.62006862, 9.58364155], [ 4.41852954, 10.77028072], [ 5.23161394, 11.94514152], [ 6.05653896, 13.11020414]]) >>> poisson_conf_interval( ... 7, interval='frequentist-confidence').T array([ 4.41852954, 10.77028072]) >>> poisson_conf_interval( ... 10, background=1.5, confidence_level=0.95, ... interval='kraft-burrows-nousek').T # doctest: +FLOAT_CMP array([[ 3.47894005, 16.113329533]]) """ if not np.isscalar(n): n = np.asanyarray(n) if interval == "root-n": _check_poisson_conf_inputs(sigma, background, confidence_level, interval) conf_interval = np.array([n - np.sqrt(n), n + np.sqrt(n)]) elif interval == "root-n-0": _check_poisson_conf_inputs(sigma, background, confidence_level, interval) conf_interval = np.array([n - np.sqrt(n), n + np.sqrt(n)]) if np.isscalar(n): if n == 0: conf_interval[1] = 1 else: conf_interval[1, n == 0] = 1 elif interval == "pearson": _check_poisson_conf_inputs(sigma, background, confidence_level, interval) conf_interval = np.array( [n + 0.5 - np.sqrt(n + 0.25), n + 0.5 + np.sqrt(n + 0.25)] ) elif interval == "sherpagehrels": _check_poisson_conf_inputs(sigma, background, confidence_level, interval) conf_interval = np.array([n - 1 - np.sqrt(n + 0.75), n + 1 + np.sqrt(n + 0.75)]) elif interval == "frequentist-confidence": _check_poisson_conf_inputs(1.0, background, confidence_level, interval) import scipy.stats alpha = scipy.stats.norm.sf(sigma) conf_interval = np.array( [ 0.5 * scipy.stats.chi2(2 * n).ppf(alpha), 0.5 * scipy.stats.chi2(2 * n + 2).isf(alpha), ] ) if np.isscalar(n): if n == 0: conf_interval[0] = 0 else: conf_interval[0, n == 0] = 0 elif interval == "kraft-burrows-nousek": # Deprecation warning in Python 3.9 when N is float, so we force int, # see https://github.com/astropy/astropy/issues/10832 if np.isscalar(n): if not isinstance(n, int): raise TypeError("Number of counts must be integer.") elif not issubclass(n.dtype.type, np.integer): raise TypeError("Number of counts must be integer.") if confidence_level is None: raise ValueError( f"Set confidence_level for method {interval}. (sigma is ignored.)" ) confidence_level = np.asanyarray(confidence_level) if np.any(confidence_level <= 0) or np.any(confidence_level >= 1): raise ValueError("confidence_level must be a number between 0 and 1.") background = np.asanyarray(background) if np.any(background < 0): raise ValueError("Background must be >= 0.") conf_interval = np.vectorize(_kraft_burrows_nousek, cache=True)( n, background, confidence_level ) conf_interval = np.vstack(conf_interval) else: raise ValueError(f"Invalid method for Poisson confidence intervals: {interval}") return conf_interval def median_absolute_deviation(data, axis=None, func=None, ignore_nan=False): """ Calculate the median absolute deviation (MAD). The MAD is defined as ``median(abs(a - median(a)))``. Parameters ---------- data : array-like Input array or object that can be converted to an array. axis : None, int, or tuple of int, optional The axis or axes along which the MADs are computed. The default (`None`) is to compute the MAD of the flattened array. func : callable, optional The function used to compute the median. Defaults to `numpy.ma.median` for masked arrays, otherwise to `numpy.median`. ignore_nan : bool Ignore NaN values (treat them as if they are not in the array) when computing the median. This will use `numpy.ma.median` if ``axis`` is specified, or `numpy.nanmedian` if ``axis==None`` and numpy's version is >1.10 because nanmedian is slightly faster in this case. Returns ------- mad : float or `~numpy.ndarray` The median absolute deviation of the input array. If ``axis`` is `None` then a scalar will be returned, otherwise a `~numpy.ndarray` will be returned. Examples -------- Generate random variates from a Gaussian distribution and return the median absolute deviation for that distribution:: >>> import numpy as np >>> from astropy.stats import median_absolute_deviation >>> rand = np.random.default_rng(12345) >>> from numpy.random import randn >>> mad = median_absolute_deviation(rand.standard_normal(1000)) >>> print(mad) # doctest: +FLOAT_CMP 0.6829504282771885 See Also -------- mad_std """ if func is None: # Check if the array has a mask and if so use np.ma.median # See https://github.com/numpy/numpy/issues/7330 why using np.ma.median # for normal arrays should not be done (summary: np.ma.median always # returns an masked array even if the result should be scalar). (#4658) if isinstance(data, np.ma.MaskedArray): is_masked = True func = np.ma.median if ignore_nan: data = np.ma.masked_where(np.isnan(data), data, copy=True) elif ignore_nan: is_masked = False func = np.nanmedian else: is_masked = False func = np.median # drops units if result is NaN else: is_masked = None data = np.asanyarray(data) # np.nanmedian has `keepdims`, which is a good option if we're not allowing # user-passed functions here data_median = func(data, axis=axis) # broadcast the median array before subtraction if axis is not None: data_median = np.expand_dims(data_median, axis=axis) result = func(np.abs(data - data_median), axis=axis, overwrite_input=True) if axis is None and np.ma.isMaskedArray(result): # return scalar version result = result.item() elif np.ma.isMaskedArray(result) and not is_masked: # if the input array was not a masked array, we don't want to return a # masked array result = result.filled(fill_value=np.nan) return result def mad_std(data, axis=None, func=None, ignore_nan=False): r""" Calculate a robust standard deviation using the `median absolute deviation (MAD) <https://en.wikipedia.org/wiki/Median_absolute_deviation>`_. The standard deviation estimator is given by: .. math:: \sigma \approx \frac{\textrm{MAD}}{\Phi^{-1}(3/4)} \approx 1.4826 \ \textrm{MAD} where :math:`\Phi^{-1}(P)` is the normal inverse cumulative distribution function evaluated at probability :math:`P = 3/4`. Parameters ---------- data : array-like Data array or object that can be converted to an array. axis : None, int, or tuple of int, optional The axis or axes along which the robust standard deviations are computed. The default (`None`) is to compute the robust standard deviation of the flattened array. func : callable, optional The function used to compute the median. Defaults to `numpy.ma.median` for masked arrays, otherwise to `numpy.median`. ignore_nan : bool Ignore NaN values (treat them as if they are not in the array) when computing the median. This will use `numpy.ma.median` if ``axis`` is specified, or `numpy.nanmedian` if ``axis=None`` and numpy's version is >1.10 because nanmedian is slightly faster in this case. Returns ------- mad_std : float or `~numpy.ndarray` The robust standard deviation of the input data. If ``axis`` is `None` then a scalar will be returned, otherwise a `~numpy.ndarray` will be returned. Examples -------- >>> import numpy as np >>> from astropy.stats import mad_std >>> rand = np.random.default_rng(12345) >>> madstd = mad_std(rand.normal(5, 2, (100, 100))) >>> print(madstd) # doctest: +FLOAT_CMP 1.984147963351707 See Also -------- biweight_midvariance, biweight_midcovariance, median_absolute_deviation """ # NOTE: 1. / scipy.stats.norm.ppf(0.75) = 1.482602218505602 MAD = median_absolute_deviation(data, axis=axis, func=func, ignore_nan=ignore_nan) return MAD * 1.482602218505602 def signal_to_noise_oir_ccd(t, source_eps, sky_eps, dark_eps, rd, npix, gain=1.0): """Computes the signal to noise ratio for source being observed in the optical/IR using a CCD. Parameters ---------- t : float or numpy.ndarray CCD integration time in seconds source_eps : float Number of electrons (photons) or DN per second in the aperture from the source. Note that this should already have been scaled by the filter transmission and the quantum efficiency of the CCD. If the input is in DN, then be sure to set the gain to the proper value for the CCD. If the input is in electrons per second, then keep the gain as its default of 1.0. sky_eps : float Number of electrons (photons) or DN per second per pixel from the sky background. Should already be scaled by filter transmission and QE. This must be in the same units as source_eps for the calculation to make sense. dark_eps : float Number of thermal electrons per second per pixel. If this is given in DN or ADU, then multiply by the gain to get the value in electrons. rd : float Read noise of the CCD in electrons. If this is given in DN or ADU, then multiply by the gain to get the value in electrons. npix : float Size of the aperture in pixels gain : float, optional Gain of the CCD. In units of electrons per DN. Returns ------- SNR : float or numpy.ndarray Signal to noise ratio calculated from the inputs """ signal = t * source_eps * gain noise = np.sqrt( t * (source_eps * gain + npix * (sky_eps * gain + dark_eps)) + npix * rd**2 ) return signal / noise def bootstrap(data, bootnum=100, samples=None, bootfunc=None): """Performs bootstrap resampling on numpy arrays. Bootstrap resampling is used to understand confidence intervals of sample estimates. This function returns versions of the dataset resampled with replacement ("case bootstrapping"). These can all be run through a function or statistic to produce a distribution of values which can then be used to find the confidence intervals. Parameters ---------- data : ndarray N-D array. The bootstrap resampling will be performed on the first index, so the first index should access the relevant information to be bootstrapped. bootnum : int, optional Number of bootstrap resamples samples : int, optional Number of samples in each resample. The default `None` sets samples to the number of datapoints bootfunc : function, optional Function to reduce the resampled data. Each bootstrap resample will be put through this function and the results returned. If `None`, the bootstrapped data will be returned Returns ------- boot : ndarray If bootfunc is None, then each row is a bootstrap resample of the data. If bootfunc is specified, then the columns will correspond to the outputs of bootfunc. Examples -------- Obtain a twice resampled array: >>> from astropy.stats import bootstrap >>> import numpy as np >>> from astropy.utils import NumpyRNGContext >>> bootarr = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) >>> with NumpyRNGContext(1): ... bootresult = bootstrap(bootarr, 2) ... >>> bootresult # doctest: +FLOAT_CMP array([[6., 9., 0., 6., 1., 1., 2., 8., 7., 0.], [3., 5., 6., 3., 5., 3., 5., 8., 8., 0.]]) >>> bootresult.shape (2, 10) Obtain a statistic on the array >>> with NumpyRNGContext(1): ... bootresult = bootstrap(bootarr, 2, bootfunc=np.mean) ... >>> bootresult # doctest: +FLOAT_CMP array([4. , 4.6]) Obtain a statistic with two outputs on the array >>> test_statistic = lambda x: (np.sum(x), np.mean(x)) >>> with NumpyRNGContext(1): ... bootresult = bootstrap(bootarr, 3, bootfunc=test_statistic) >>> bootresult # doctest: +FLOAT_CMP array([[40. , 4. ], [46. , 4.6], [35. , 3.5]]) >>> bootresult.shape (3, 2) Obtain a statistic with two outputs on the array, keeping only the first output >>> bootfunc = lambda x:test_statistic(x)[0] >>> with NumpyRNGContext(1): ... bootresult = bootstrap(bootarr, 3, bootfunc=bootfunc) ... >>> bootresult # doctest: +FLOAT_CMP array([40., 46., 35.]) >>> bootresult.shape (3,) """ if samples is None: samples = data.shape[0] # make sure the input is sane if samples < 1 or bootnum < 1: raise ValueError("neither 'samples' nor 'bootnum' can be less than 1.") if bootfunc is None: resultdims = (bootnum,) + (samples,) + data.shape[1:] else: # test number of outputs from bootfunc, avoid single outputs which are # array-like try: resultdims = (bootnum, len(bootfunc(data))) except TypeError: resultdims = (bootnum,) # create empty boot array boot = np.empty(resultdims) for i in range(bootnum): bootarr = np.random.randint(low=0, high=data.shape[0], size=samples) if bootfunc is None: boot[i] = data[bootarr] else: boot[i] = bootfunc(data[bootarr]) return boot def _scipy_kraft_burrows_nousek(N, B, CL): """Upper limit on a poisson count rate. The implementation is based on Kraft, Burrows and Nousek `ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_. The XMM-Newton upper limit server uses the same formalism. Parameters ---------- N : int or np.int32/np.int64 Total observed count number B : float or np.float32/np.float64 Background count rate (assumed to be known with negligible error from a large background area). CL : float or np.float32/np.float64 Confidence level (number between 0 and 1) Returns ------- S : source count limit Notes ----- Requires :mod:`~scipy`. This implementation will cause Overflow Errors for about N > 100 (the exact limit depends on details of how scipy was compiled). See `~astropy.stats.mpmath_poisson_upper_limit` for an implementation that is slower, but can deal with arbitrarily high numbers since it is based on the `mpmath <http://mpmath.org/>`_ library. """ from math import exp from scipy.integrate import quad from scipy.optimize import brentq from scipy.special import factorial def eqn8(N, B): n = np.arange(N + 1, dtype=np.float64) return 1.0 / (exp(-B) * np.sum(np.power(B, n) / factorial(n))) # The parameters of eqn8 do not vary between calls so we can calculate the # result once and reuse it. The same is True for the factorial of N. # eqn7 is called hundred times so "caching" these values yields a # significant speedup (factor 10). eqn8_res = eqn8(N, B) factorial_N = float(math.factorial(N)) def eqn7(S, N, B): SpB = S + B return eqn8_res * (exp(-SpB) * SpB**N / factorial_N) def eqn9_left(S_min, S_max, N, B): return quad(eqn7, S_min, S_max, args=(N, B), limit=500) def find_s_min(S_max, N, B): """ Kraft, Burrows and Nousek suggest to integrate from N-B in both directions at once, so that S_min and S_max move similarly (see the article for details). Here, this is implemented differently: Treat S_max as the optimization parameters in func and then calculate the matching s_min that has has eqn7(S_max) = eqn7(S_min) here. """ y_S_max = eqn7(S_max, N, B) if eqn7(0, N, B) >= y_S_max: return 0.0 else: return brentq(lambda x: eqn7(x, N, B) - y_S_max, 0, N - B) def func(s): s_min = find_s_min(s, N, B) out = eqn9_left(s_min, s, N, B) return out[0] - CL S_max = brentq(func, N - B, 100) S_min = find_s_min(S_max, N, B) return S_min, S_max def _mpmath_kraft_burrows_nousek(N, B, CL): """Upper limit on a poisson count rate. The implementation is based on Kraft, Burrows and Nousek in `ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_. The XMM-Newton upper limit server used the same formalism. Parameters ---------- N : int or np.int32/np.int64 Total observed count number B : float or np.float32/np.float64 Background count rate (assumed to be known with negligible error from a large background area). CL : float or np.float32/np.float64 Confidence level (number between 0 and 1) Returns ------- S : source count limit Notes ----- Requires the `mpmath <http://mpmath.org/>`_ library. See `~astropy.stats.scipy_poisson_upper_limit` for an implementation that is based on scipy and evaluates faster, but runs only to about N = 100. """ from mpmath import exp, factorial, findroot, fsum, mpf, power, quad # We convert these values to float. Because for some reason, # mpmath.mpf cannot convert from numpy.int64 N = mpf(float(N)) B = mpf(float(B)) CL = mpf(float(CL)) tol = 1e-4 def eqn8(N, B): sumterms = [power(B, n) / factorial(n) for n in range(int(N) + 1)] return 1.0 / (exp(-B) * fsum(sumterms)) eqn8_res = eqn8(N, B) factorial_N = factorial(N) def eqn7(S, N, B): SpB = S + B return eqn8_res * (exp(-SpB) * SpB**N / factorial_N) def eqn9_left(S_min, S_max, N, B): def eqn7NB(S): return eqn7(S, N, B) return quad(eqn7NB, [S_min, S_max]) def find_s_min(S_max, N, B): """ Kraft, Burrows and Nousek suggest to integrate from N-B in both directions at once, so that S_min and S_max move similarly (see the article for details). Here, this is implemented differently: Treat S_max as the optimization parameters in func and then calculate the matching s_min that has has eqn7(S_max) = eqn7(S_min) here. """ y_S_max = eqn7(S_max, N, B) # If B > N, then N-B, the "most probable" values is < 0 # and thus s_min is certainly 0. # Note: For small N, s_max is also close to 0 and root finding # might find the wrong root, thus it is important to handle this # case here and return the analytical answer (s_min = 0). if (B >= N) or (eqn7(0, N, B) >= y_S_max): return 0.0 else: def eqn7ysmax(x): return eqn7(x, N, B) - y_S_max return findroot(eqn7ysmax, [0.0, N - B], solver="ridder", tol=tol) def func(s): s_min = find_s_min(s, N, B) out = eqn9_left(s_min, s, N, B) return out - CL # Several numerical problems were found prevent the solvers from finding # the roots unless the starting values are very close to the final values. # Thus, this primitive, time-wasting, brute-force stepping here to get # an interval that can be fed into the ridder solver. s_max_guess = max(N - B, 1.0) while func(s_max_guess) < 0: s_max_guess += 1 S_max = findroot(func, [s_max_guess - 1, s_max_guess], solver="ridder", tol=tol) S_min = find_s_min(S_max, N, B) return float(S_min), float(S_max) def _kraft_burrows_nousek(N, B, CL): """Upper limit on a poisson count rate. The implementation is based on Kraft, Burrows and Nousek in `ApJ 374, 344 (1991) <https://ui.adsabs.harvard.edu/abs/1991ApJ...374..344K>`_. The XMM-Newton upper limit server used the same formalism. Parameters ---------- N : int or np.int32/np.int64 Total observed count number B : float or np.float32/np.float64 Background count rate (assumed to be known with negligible error from a large background area). CL : float or np.float32/np.float64 Confidence level (number between 0 and 1) Returns ------- S : source count limit Notes ----- This functions has an optional dependency: Either :mod:`scipy` or `mpmath <http://mpmath.org/>`_ need to be available. (Scipy only works for N < 100). """ from astropy.utils.compat.optional_deps import HAS_MPMATH, HAS_SCIPY if HAS_SCIPY and N <= 100: try: return _scipy_kraft_burrows_nousek(N, B, CL) except OverflowError: if not HAS_MPMATH: raise ValueError("Need mpmath package for input numbers this large.") if HAS_MPMATH: return _mpmath_kraft_burrows_nousek(N, B, CL) raise ImportError("Either scipy or mpmath are required.") def kuiper_false_positive_probability(D, N): """Compute the false positive probability for the Kuiper statistic. Uses the set of four formulas described in Paltani 2004; they report the resulting function never underestimates the false positive probability but can be a bit high in the N=40..50 range. (They quote a factor 1.5 at the 1e-7 level.) Parameters ---------- D : float The Kuiper test score. N : float The effective sample size. Returns ------- fpp : float The probability of a score this large arising from the null hypothesis. Notes ----- Eq 7 of Paltani 2004 appears to incorrectly quote the original formula (Stephens 1965). This function implements the original formula, as it produces a result closer to Monte Carlo simulations. References ---------- .. [1] Paltani, S., "Searching for periods in X-ray observations using Kuiper's test. Application to the ROSAT PSPC archive", Astronomy and Astrophysics, v.240, p.789-790, 2004. .. [2] Stephens, M. A., "The goodness-of-fit statistic VN: distribution and significance points", Biometrika, v.52, p.309, 1965. """ try: from scipy.special import comb, factorial except ImportError: # Retained for backwards compatibility with older versions of scipy # (factorial appears to have moved here in 0.14) from scipy.misc import comb, factorial if D < 0.0 or D > 2.0: raise ValueError("Must have 0<=D<=2 by definition of the Kuiper test") if D < 2.0 / N: return 1.0 - factorial(N) * (D - 1.0 / N) ** (N - 1) elif D < 3.0 / N: k = -(N * D - 1.0) / 2.0 r = np.sqrt(k**2 - (N * D - 2.0) ** 2 / 2.0) a, b = -k + r, -k - r return 1 - ( factorial(N - 1) * (b ** (N - 1) * (1 - a) - a ** (N - 1) * (1 - b)) / N ** (N - 2) / (b - a) ) elif (D > 0.5 and N % 2 == 0) or (D > (N - 1.0) / (2.0 * N) and N % 2 == 1): # NOTE: the upper limit of this sum is taken from Stephens 1965 t = np.arange(np.floor(N * (1 - D)) + 1) y = D + t / N Tt = y ** (t - 3) * ( y**3 * N - y**2 * t * (3 - 2 / N) + y * t * (t - 1) * (3 - 2 / N) / N - t * (t - 1) * (t - 2) / N**2 ) term1 = comb(N, t) term2 = (1 - D - t / N) ** (N - t - 1) # term1 is formally finite, but is approximated by numpy as np.inf for # large values, so we set them to zero manually when they would be # multiplied by zero anyway term1[(term1 == np.inf) & (term2 == 0)] = 0.0 final_term = Tt * term1 * term2 return final_term.sum() else: z = D * np.sqrt(N) # When m*z>18.82 (sqrt(-log(finfo(double))/2)), exp(-2m**2z**2) # underflows. Cutting off just before avoids triggering a (pointless) # underflow warning if `under="warn"`. ms = np.arange(1, 18.82 / z) S1 = (2 * (4 * ms**2 * z**2 - 1) * np.exp(-2 * ms**2 * z**2)).sum() S2 = ( ms**2 * (4 * ms**2 * z**2 - 3) * np.exp(-2 * ms**2 * z**2) ).sum() return S1 - 8 * D / 3 * S2 def kuiper(data, cdf=lambda x: x, args=()): """Compute the Kuiper statistic. Use the Kuiper statistic version of the Kolmogorov-Smirnov test to find the probability that a sample like ``data`` was drawn from the distribution whose CDF is given as ``cdf``. .. warning:: This will not work correctly for distributions that are actually discrete (Poisson, for example). Parameters ---------- data : array-like The data values. cdf : callable A callable to evaluate the CDF of the distribution being tested against. Will be called with a vector of all values at once. The default is a uniform distribution. args : list-like, optional Additional arguments to be supplied to cdf. Returns ------- D : float The raw statistic. fpp : float The probability of a D this large arising with a sample drawn from the distribution whose CDF is cdf. Notes ----- The Kuiper statistic resembles the Kolmogorov-Smirnov test in that it is nonparametric and invariant under reparameterizations of the data. The Kuiper statistic, in addition, is equally sensitive throughout the domain, and it is also invariant under cyclic permutations (making it particularly appropriate for analyzing circular data). Returns (D, fpp), where D is the Kuiper D number and fpp is the probability that a value as large as D would occur if data was drawn from cdf. .. warning:: The fpp is calculated only approximately, and it can be as much as 1.5 times the true value. Stephens 1970 claims this is more effective than the KS at detecting changes in the variance of a distribution; the KS is (he claims) more sensitive at detecting changes in the mean. If cdf was obtained from data by fitting, then fpp is not correct and it will be necessary to do Monte Carlo simulations to interpret D. D should normally be independent of the shape of CDF. References ---------- .. [1] Stephens, M. A., "Use of the Kolmogorov-Smirnov, Cramer-Von Mises and Related Statistics Without Extensive Tables", Journal of the Royal Statistical Society. Series B (Methodological), Vol. 32, No. 1. (1970), pp. 115-122. """ data = np.sort(data) cdfv = cdf(data, *args) N = len(data) D = np.amax(cdfv - np.arange(N) / float(N)) + np.amax( (np.arange(N) + 1) / float(N) - cdfv ) return D, kuiper_false_positive_probability(D, N) def kuiper_two(data1, data2): """Compute the Kuiper statistic to compare two samples. Parameters ---------- data1 : array-like The first set of data values. data2 : array-like The second set of data values. Returns ------- D : float The raw test statistic. fpp : float The probability of obtaining two samples this different from the same distribution. .. warning:: The fpp is quite approximate, especially for small samples. """ data1 = np.sort(data1) data2 = np.sort(data2) (n1,) = data1.shape (n2,) = data2.shape common_type = np.find_common_type([], [data1.dtype, data2.dtype]) if not ( np.issubdtype(common_type, np.number) and not np.issubdtype(common_type, np.complexfloating) ): raise ValueError("kuiper_two only accepts real inputs") # nans, if any, are at the end after sorting. if np.isnan(data1[-1]) or np.isnan(data2[-1]): raise ValueError("kuiper_two only accepts non-nan inputs") D = _stats.ks_2samp(np.asarray(data1, common_type), np.asarray(data2, common_type)) Ne = len(data1) * len(data2) / float(len(data1) + len(data2)) return D, kuiper_false_positive_probability(D, Ne) def fold_intervals(intervals): """Fold the weighted intervals to the interval (0,1). Convert a list of intervals (ai, bi, wi) to a list of non-overlapping intervals covering (0,1). Each output interval has a weight equal to the sum of the wis of all the intervals that include it. All intervals are interpreted modulo 1, and weights are accumulated counting multiplicity. This is appropriate, for example, if you have one or more blocks of observation and you want to determine how much observation time was spent on different parts of a system's orbit (the blocks should be converted to units of the orbital period first). Parameters ---------- intervals : list of (3,) tuple For each tuple (ai,bi,wi); ai and bi are the limits of the interval, and wi is the weight to apply to the interval. Returns ------- breaks : (N,) array of float The endpoints of a set of intervals covering [0,1]; breaks[0]=0 and breaks[-1] = 1 weights : (N-1,) array of float The ith element is the sum of number of times the interval breaks[i],breaks[i+1] is included in each interval times the weight associated with that interval. """ r = [] breaks = set() tot = 0 for a, b, wt in intervals: tot += (np.ceil(b) - np.floor(a)) * wt fa = a % 1 breaks.add(fa) r.append((0, fa, -wt)) fb = b % 1 breaks.add(fb) r.append((fb, 1, -wt)) breaks.add(0.0) breaks.add(1.0) breaks = sorted(breaks) breaks_map = {f: i for (i, f) in enumerate(breaks)} totals = np.zeros(len(breaks) - 1) totals += tot for a, b, wt in r: totals[breaks_map[a] : breaks_map[b]] += wt return np.array(breaks), totals def cdf_from_intervals(breaks, totals): """Construct a callable piecewise-linear CDF from a pair of arrays. Take a pair of arrays in the format returned by fold_intervals and make a callable cumulative distribution function on the interval (0,1). Parameters ---------- breaks : (N,) array of float The boundaries of successive intervals. totals : (N-1,) array of float The weight for each interval. Returns ------- f : callable A cumulative distribution function corresponding to the piecewise-constant probability distribution given by breaks, weights """ if breaks[0] != 0 or breaks[-1] != 1: raise ValueError("Intervals must be restricted to [0,1]") if np.any(np.diff(breaks) <= 0): raise ValueError("Breaks must be strictly increasing") if np.any(totals < 0): raise ValueError("Total weights in each subinterval must be nonnegative") if np.all(totals == 0): raise ValueError("At least one interval must have positive exposure") b = breaks.copy() c = np.concatenate(((0,), np.cumsum(totals * np.diff(b)))) c /= c[-1] return lambda x: np.interp(x, b, c, 0, 1) def interval_overlap_length(i1, i2): """Compute the length of overlap of two intervals. Parameters ---------- i1, i2 : (float, float) The two intervals, (interval 1, interval 2). Returns ------- l : float The length of the overlap between the two intervals. """ (a, b) = i1 (c, d) = i2 if a < c: if b < c: return 0.0 elif b < d: return b - c else: return d - c elif a < d: if b < d: return b - a else: return d - a else: return 0 def histogram_intervals(n, breaks, totals): """Histogram of a piecewise-constant weight function. This function takes a piecewise-constant weight function and computes the average weight in each histogram bin. Parameters ---------- n : int The number of bins breaks : (N,) array of float Endpoints of the intervals in the PDF totals : (N-1,) array of float Probability densities in each bin Returns ------- h : array of float The average weight for each bin """ h = np.zeros(n) start = breaks[0] for i in range(len(totals)): end = breaks[i + 1] for j in range(n): ol = interval_overlap_length((float(j) / n, float(j + 1) / n), (start, end)) h[j] += ol / (1.0 / n) * totals[i] start = end return h
7b19eb5445ba358ac09c2d72a480df0522d714e5aeab3a80c6248b25eae9791f
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Methods for selecting the bin width of histograms. Ported from the astroML project: https://www.astroml.org/ """ import numpy as np from .bayesian_blocks import bayesian_blocks __all__ = [ "histogram", "scott_bin_width", "freedman_bin_width", "knuth_bin_width", "calculate_bin_edges", ] def calculate_bin_edges(a, bins=10, range=None, weights=None): """ Calculate histogram bin edges like ``numpy.histogram_bin_edges``. Parameters ---------- a : array-like Input data. The bin edges are calculated over the flattened array. bins : int, list, or str, optional If ``bins`` is an int, it is the number of bins. If it is a list it is taken to be the bin edges. If it is a string, it must be one of 'blocks', 'knuth', 'scott' or 'freedman'. See `~astropy.stats.histogram` for a description of each method. range : tuple or None, optional The minimum and maximum range for the histogram. If not specified, it will be (a.min(), a.max()). However, if bins is a list it is returned unmodified regardless of the range argument. weights : array-like, optional An array the same shape as ``a``. If given, the histogram accumulates the value of the weight corresponding to ``a`` instead of returning the count of values. This argument does not affect determination of bin edges, though they may be used in the future as new methods are added. """ # if range is specified, we need to truncate the data for # the bin-finding routines if range is not None: a = a[(a >= range[0]) & (a <= range[1])] # if bins is a string, first compute bin edges with the desired heuristic if isinstance(bins, str): a = np.asarray(a).ravel() # TODO: if weights is specified, we need to modify things. # e.g. we could use point measures fitness for Bayesian blocks if weights is not None: raise NotImplementedError( "weights are not yet supported for the enhanced histogram" ) if bins == "blocks": bins = bayesian_blocks(a) elif bins == "knuth": da, bins = knuth_bin_width(a, True) elif bins == "scott": da, bins = scott_bin_width(a, True) elif bins == "freedman": da, bins = freedman_bin_width(a, True) else: raise ValueError(f"unrecognized bin code: '{bins}'") if range: # Check that the upper and lower edges are what was requested. # The current implementation of the bin width estimators does not # guarantee this, it only ensures that data outside the range is # excluded from calculation of the bin widths. if bins[0] != range[0]: bins[0] = range[0] if bins[-1] != range[1]: bins[-1] = range[1] elif np.ndim(bins) == 0: # Number of bins was given bins = np.histogram_bin_edges(a, bins, range=range, weights=weights) return bins def histogram(a, bins=10, range=None, weights=None, **kwargs): """Enhanced histogram function, providing adaptive binnings. This is a histogram function that enables the use of more sophisticated algorithms for determining bins. Aside from the ``bins`` argument allowing a string specified how bins are computed, the parameters are the same as ``numpy.histogram()``. Parameters ---------- a : array-like array of data to be histogrammed bins : int, list, or str, optional If bins is a string, then it must be one of: - 'blocks' : use bayesian blocks for dynamic bin widths - 'knuth' : use Knuth's rule to determine bins - 'scott' : use Scott's rule to determine bins - 'freedman' : use the Freedman-Diaconis rule to determine bins range : tuple or None, optional the minimum and maximum range for the histogram. If not specified, it will be (x.min(), x.max()) weights : array-like, optional An array the same shape as ``a``. If given, the histogram accumulates the value of the weight corresponding to ``a`` instead of returning the count of values. This argument does not affect determination of bin edges. other keyword arguments are described in numpy.histogram(). Returns ------- hist : array The values of the histogram. See ``density`` and ``weights`` for a description of the possible semantics. bin_edges : array of dtype float Return the bin edges ``(length(hist)+1)``. See Also -------- numpy.histogram """ bins = calculate_bin_edges(a, bins=bins, range=range, weights=weights) # Now we call numpy's histogram with the resulting bin edges return np.histogram(a, bins=bins, range=range, weights=weights, **kwargs) def scott_bin_width(data, return_bins=False): r"""Return the optimal histogram bin width using Scott's rule. Scott's rule is a normal reference rule: it minimizes the integrated mean squared error in the bin approximation under the assumption that the data is approximately Gaussian. Parameters ---------- data : array-like, ndim=1 observed (one-dimensional) data return_bins : bool, optional if True, then return the bin edges Returns ------- width : float optimal bin width using Scott's rule bins : ndarray bin edges: returned if ``return_bins`` is True Notes ----- The optimal bin width is .. math:: \Delta_b = \frac{3.5\sigma}{n^{1/3}} where :math:`\sigma` is the standard deviation of the data, and :math:`n` is the number of data points [1]_. References ---------- .. [1] Scott, David W. (1979). "On optimal and data-based histograms". Biometricka 66 (3): 605-610 See Also -------- knuth_bin_width freedman_bin_width bayesian_blocks histogram """ data = np.asarray(data) if data.ndim != 1: raise ValueError("data should be one-dimensional") n = data.size sigma = np.std(data) dx = 3.5 * sigma / (n ** (1 / 3)) if return_bins: Nbins = np.ceil((data.max() - data.min()) / dx) Nbins = max(1, Nbins) bins = data.min() + dx * np.arange(Nbins + 1) return dx, bins else: return dx def freedman_bin_width(data, return_bins=False): r"""Return the optimal histogram bin width using the Freedman-Diaconis rule. The Freedman-Diaconis rule is a normal reference rule like Scott's rule, but uses rank-based statistics for results which are more robust to deviations from a normal distribution. Parameters ---------- data : array-like, ndim=1 observed (one-dimensional) data return_bins : bool, optional if True, then return the bin edges Returns ------- width : float optimal bin width using the Freedman-Diaconis rule bins : ndarray bin edges: returned if ``return_bins`` is True Notes ----- The optimal bin width is .. math:: \Delta_b = \frac{2(q_{75} - q_{25})}{n^{1/3}} where :math:`q_{N}` is the :math:`N` percent quartile of the data, and :math:`n` is the number of data points [1]_. References ---------- .. [1] D. Freedman & P. Diaconis (1981) "On the histogram as a density estimator: L2 theory". Probability Theory and Related Fields 57 (4): 453-476 See Also -------- knuth_bin_width scott_bin_width bayesian_blocks histogram """ data = np.asarray(data) if data.ndim != 1: raise ValueError("data should be one-dimensional") n = data.size if n < 4: raise ValueError("data should have more than three entries") v25, v75 = np.percentile(data, [25, 75]) dx = 2 * (v75 - v25) / (n ** (1 / 3)) if return_bins: dmin, dmax = data.min(), data.max() Nbins = max(1, np.ceil((dmax - dmin) / dx)) try: bins = dmin + dx * np.arange(Nbins + 1) except ValueError as e: if "Maximum allowed size exceeded" in str(e): raise ValueError( "The inter-quartile range of the data is too small: " f"failed to construct histogram with {Nbins + 1} bins. " "Please use another bin method, such as " 'bins="scott"' ) else: # Something else # pragma: no cover raise return dx, bins else: return dx def knuth_bin_width(data, return_bins=False, quiet=True): r"""Return the optimal histogram bin width using Knuth's rule. Knuth's rule is a fixed-width, Bayesian approach to determining the optimal bin width of a histogram. Parameters ---------- data : array-like, ndim=1 observed (one-dimensional) data return_bins : bool, optional if True, then return the bin edges quiet : bool, optional if True (default) then suppress stdout output from scipy.optimize Returns ------- dx : float optimal bin width. Bins are measured starting at the first data point. bins : ndarray bin edges: returned if ``return_bins`` is True Notes ----- The optimal number of bins is the value M which maximizes the function .. math:: F(M|x,I) = n\log(M) + \log\Gamma(\frac{M}{2}) - M\log\Gamma(\frac{1}{2}) - \log\Gamma(\frac{2n+M}{2}) + \sum_{k=1}^M \log\Gamma(n_k + \frac{1}{2}) where :math:`\Gamma` is the Gamma function, :math:`n` is the number of data points, :math:`n_k` is the number of measurements in bin :math:`k` [1]_. References ---------- .. [1] Knuth, K.H. "Optimal Data-Based Binning for Histograms". arXiv:0605197, 2006 See Also -------- freedman_bin_width scott_bin_width bayesian_blocks histogram """ # import here because of optional scipy dependency from scipy import optimize knuthF = _KnuthF(data) dx0, bins0 = freedman_bin_width(data, True) M = optimize.fmin(knuthF, len(bins0), disp=not quiet)[0] bins = knuthF.bins(M) dx = bins[1] - bins[0] if return_bins: return dx, bins else: return dx class _KnuthF: r"""Class which implements the function minimized by knuth_bin_width. Parameters ---------- data : array-like, one dimension data to be histogrammed Notes ----- the function F is given by .. math:: F(M|x,I) = n\log(M) + \log\Gamma(\frac{M}{2}) - M\log\Gamma(\frac{1}{2}) - \log\Gamma(\frac{2n+M}{2}) + \sum_{k=1}^M \log\Gamma(n_k + \frac{1}{2}) where :math:`\Gamma` is the Gamma function, :math:`n` is the number of data points, :math:`n_k` is the number of measurements in bin :math:`k`. See Also -------- knuth_bin_width """ def __init__(self, data): self.data = np.array(data, copy=True) if self.data.ndim != 1: raise ValueError("data should be 1-dimensional") self.data.sort() self.n = self.data.size # import here rather than globally: scipy is an optional dependency. # Note that scipy is imported in the function which calls this, # so there shouldn't be any issue importing here. from scipy import special # create a reference to gammaln to use in self.eval() self.gammaln = special.gammaln def bins(self, M): """Return the bin edges given M number of bins.""" return np.linspace(self.data[0], self.data[-1], int(M) + 1) def __call__(self, M): return self.eval(M) def eval(self, M): """Evaluate the Knuth function. Parameters ---------- M : int Number of bins Returns ------- F : float evaluation of the negative Knuth loglikelihood function: smaller values indicate a better fit. """ M = int(M) if M <= 0: return np.inf bins = self.bins(M) nk, bins = np.histogram(self.data, bins) return -( self.n * np.log(M) + self.gammaln(0.5 * M) - M * self.gammaln(0.5) - self.gammaln(self.n + 0.5 * M) + np.sum(self.gammaln(nk + 0.5)) )
b6e76f6e0835647dc485d3879d7536a7050f4051d0b37a6cecc79d4117e6c048
# Licensed under a 3-clause BSD style license - see LICENSE.rst from os.path import abspath, dirname, join import astropy.config as _config import astropy.io.registry as io_registry from astropy import extern from .table import Table class Conf(_config.ConfigNamespace): """ Configuration parameters for `astropy.table.jsviewer`. """ jquery_url = _config.ConfigItem( "https://code.jquery.com/jquery-3.6.0.min.js", "The URL to the jquery library." ) datatables_url = _config.ConfigItem( "https://cdn.datatables.net/1.10.12/js/jquery.dataTables.min.js", "The URL to the jquery datatables library.", ) css_urls = _config.ConfigItem( ["https://cdn.datatables.net/1.10.12/css/jquery.dataTables.css"], "The URLs to the css file(s) to include.", cfgtype="string_list", ) conf = Conf() EXTERN_JS_DIR = abspath(join(dirname(extern.__file__), "jquery", "data", "js")) EXTERN_CSS_DIR = abspath(join(dirname(extern.__file__), "jquery", "data", "css")) _SORTING_SCRIPT_PART_1 = """ var astropy_sort_num = function(a, b) {{ var a_num = parseFloat(a); var b_num = parseFloat(b); if (isNaN(a_num) && isNaN(b_num)) return ((a < b) ? -1 : ((a > b) ? 1 : 0)); else if (!isNaN(a_num) && !isNaN(b_num)) return ((a_num < b_num) ? -1 : ((a_num > b_num) ? 1 : 0)); else return isNaN(a_num) ? -1 : 1; }} """ _SORTING_SCRIPT_PART_2 = """ jQuery.extend( jQuery.fn.dataTableExt.oSort, {{ "optionalnum-asc": astropy_sort_num, "optionalnum-desc": function (a,b) {{ return -astropy_sort_num(a, b); }} }}); """ IPYNB_JS_SCRIPT = """ <script> %(sorting_script1)s require.config({{paths: {{ datatables: '{datatables_url}' }}}}); require(["datatables"], function(){{ console.log("$('#{tid}').dataTable()"); %(sorting_script2)s $('#{tid}').dataTable({{ order: [], pageLength: {display_length}, lengthMenu: {display_length_menu}, pagingType: "full_numbers", columnDefs: [{{targets: {sort_columns}, type: "optionalnum"}}] }}); }}); </script> """ % dict( # noqa: UP031 sorting_script1=_SORTING_SCRIPT_PART_1, sorting_script2=_SORTING_SCRIPT_PART_2 ) HTML_JS_SCRIPT = ( _SORTING_SCRIPT_PART_1 + _SORTING_SCRIPT_PART_2 + """ $(document).ready(function() {{ $('#{tid}').dataTable({{ order: [], pageLength: {display_length}, lengthMenu: {display_length_menu}, pagingType: "full_numbers", columnDefs: [{{targets: {sort_columns}, type: "optionalnum"}}] }}); }} ); """ ) # Default CSS for the JSViewer writer DEFAULT_CSS = """\ body {font-family: sans-serif;} table.dataTable {width: auto !important; margin: 0 !important;} .dataTables_filter, .dataTables_paginate {float: left !important; margin-left:1em} """ # Default CSS used when rendering a table in the IPython notebook DEFAULT_CSS_NB = """\ table.dataTable {clear: both; width: auto !important; margin: 0 !important;} .dataTables_info, .dataTables_length, .dataTables_filter, .dataTables_paginate{ display: inline-block; margin-right: 1em; } .paginate_button { margin-right: 5px; } """ class JSViewer: """Provides an interactive HTML export of a Table. This class provides an interface to the `DataTables <https://datatables.net/>`_ library, which allow to visualize interactively an HTML table. It is used by the `~astropy.table.Table.show_in_browser` method. Parameters ---------- use_local_files : bool, optional Use local files or a CDN for JavaScript libraries. Default False. display_length : int, optional Number or rows to show. Default to 50. """ def __init__(self, use_local_files=False, display_length=50): self._use_local_files = use_local_files self.display_length_menu = [ [10, 25, 50, 100, 500, 1000, -1], [10, 25, 50, 100, 500, 1000, "All"], ] self.display_length = display_length for L in self.display_length_menu: if display_length not in L: L.insert(0, display_length) @property def jquery_urls(self): if self._use_local_files: return [ "file://" + join(EXTERN_JS_DIR, "jquery-3.6.0.min.js"), "file://" + join(EXTERN_JS_DIR, "jquery.dataTables.min.js"), ] else: return [conf.jquery_url, conf.datatables_url] @property def css_urls(self): if self._use_local_files: return ["file://" + join(EXTERN_CSS_DIR, "jquery.dataTables.css")] else: return conf.css_urls def _jstable_file(self): if self._use_local_files: return "file://" + join(EXTERN_JS_DIR, "jquery.dataTables.min") else: return conf.datatables_url[:-3] def ipynb(self, table_id, css=None, sort_columns="[]"): html = f"<style>{css if css is not None else DEFAULT_CSS_NB}</style>" html += IPYNB_JS_SCRIPT.format( display_length=self.display_length, display_length_menu=self.display_length_menu, datatables_url=self._jstable_file(), tid=table_id, sort_columns=sort_columns, ) return html def html_js(self, table_id="table0", sort_columns="[]"): return HTML_JS_SCRIPT.format( display_length=self.display_length, display_length_menu=self.display_length_menu, tid=table_id, sort_columns=sort_columns, ).strip() def write_table_jsviewer( table, filename, table_id=None, max_lines=5000, table_class="display compact", jskwargs=None, css=DEFAULT_CSS, htmldict=None, overwrite=False, ): if table_id is None: table_id = f"table{id(table)}" jskwargs = jskwargs or {} jsv = JSViewer(**jskwargs) sortable_columns = [ i for i, col in enumerate(table.columns.values()) if col.info.dtype.kind in "iufc" ] html_options = { "table_id": table_id, "table_class": table_class, "css": css, "cssfiles": jsv.css_urls, "jsfiles": jsv.jquery_urls, "js": jsv.html_js(table_id=table_id, sort_columns=sortable_columns), } if htmldict: html_options.update(htmldict) if max_lines < len(table): table = table[:max_lines] table.write(filename, format="html", htmldict=html_options, overwrite=overwrite) io_registry.register_writer("jsviewer", Table, write_table_jsviewer)
a41fda542d6831b299f45d7d9587226a517c844dbe8e6d3bd0f13726496e048a
# Licensed under a 3-clause BSD style license - see LICENSE.rst import itertools import sys import types import warnings import weakref from collections import OrderedDict, defaultdict from collections.abc import Mapping from copy import deepcopy import numpy as np from numpy import ma from astropy import log from astropy.io.registry import UnifiedReadWriteMethod from astropy.units import Quantity, QuantityInfo from astropy.utils import ShapedLikeNDArray, isiterable from astropy.utils.console import color_print from astropy.utils.data_info import BaseColumnInfo, DataInfo, MixinInfo from astropy.utils.decorators import format_doc from astropy.utils.exceptions import AstropyUserWarning from astropy.utils.masked import Masked from astropy.utils.metadata import MetaAttribute, MetaData from . import conf, groups from .column import ( BaseColumn, Column, FalseArray, MaskedColumn, _auto_names, _convert_sequence_data_to_array, col_copy, ) from .connect import TableRead, TableWrite from .index import ( Index, SlicedIndex, TableILoc, TableIndices, TableLoc, TableLocIndices, _IndexModeContext, get_index, ) from .info import TableInfo from .mixins.registry import get_mixin_handler from .ndarray_mixin import NdarrayMixin # noqa: F401 from .pprint import TableFormatter from .row import Row _implementation_notes = """ This string has informal notes concerning Table implementation for developers. Things to remember: - Table has customizable attributes ColumnClass, Column, MaskedColumn. Table.Column is normally just column.Column (same w/ MaskedColumn) but in theory they can be different. Table.ColumnClass is the default class used to create new non-mixin columns, and this is a function of the Table.masked attribute. Column creation / manipulation in a Table needs to respect these. - Column objects that get inserted into the Table.columns attribute must have the info.parent_table attribute set correctly. Beware just dropping an object into the columns dict since an existing column may be part of another Table and have parent_table set to point at that table. Dropping that column into `columns` of this Table will cause a problem for the old one so the column object needs to be copied (but not necessarily the data). Currently replace_column is always making a copy of both object and data if parent_table is set. This could be improved but requires a generic way to copy a mixin object but not the data. - Be aware of column objects that have indices set. - `cls.ColumnClass` is a property that effectively uses the `masked` attribute to choose either `cls.Column` or `cls.MaskedColumn`. """ __doctest_skip__ = [ "Table.read", "Table.write", "Table._read", "Table.convert_bytestring_to_unicode", "Table.convert_unicode_to_bytestring", ] __doctest_requires__ = {"*pandas": ["pandas>=1.1"]} _pprint_docs = """ {__doc__} Parameters ---------- max_lines : int or None Maximum number of lines in table output. max_width : int or None Maximum character width of output. show_name : bool Include a header row for column names. Default is True. show_unit : bool Include a header row for unit. Default is to show a row for units only if one or more columns has a defined value for the unit. show_dtype : bool Include a header row for column dtypes. Default is False. align : str or list or tuple or None Left/right alignment of columns. Default is right (None) for all columns. Other allowed values are '>', '<', '^', and '0=' for right, left, centered, and 0-padded, respectively. A list of strings can be provided for alignment of tables with multiple columns. """ _pformat_docs = """ {__doc__} Parameters ---------- max_lines : int or None Maximum number of rows to output max_width : int or None Maximum character width of output show_name : bool Include a header row for column names. Default is True. show_unit : bool Include a header row for unit. Default is to show a row for units only if one or more columns has a defined value for the unit. show_dtype : bool Include a header row for column dtypes. Default is True. html : bool Format the output as an HTML table. Default is False. tableid : str or None An ID tag for the table; only used if html is set. Default is "table{id}", where id is the unique integer id of the table object, id(self) align : str or list or tuple or None Left/right alignment of columns. Default is right (None) for all columns. Other allowed values are '>', '<', '^', and '0=' for right, left, centered, and 0-padded, respectively. A list of strings can be provided for alignment of tables with multiple columns. tableclass : str or list of str or None CSS classes for the table; only used if html is set. Default is None. Returns ------- lines : list Formatted table as a list of strings. """ class TableReplaceWarning(UserWarning): """ Warning class for cases when a table column is replaced via the Table.__setitem__ syntax e.g. t['a'] = val. This does not inherit from AstropyWarning because we want to use stacklevel=3 to show the user where the issue occurred in their code. """ pass def descr(col): """Array-interface compliant full description of a column. This returns a 3-tuple (name, type, shape) that can always be used in a structured array dtype definition. """ col_dtype = "O" if (col.info.dtype is None) else col.info.dtype col_shape = col.shape[1:] if hasattr(col, "shape") else () return (col.info.name, col_dtype, col_shape) def has_info_class(obj, cls): """Check if the object's info is an instance of cls.""" # We check info on the class of the instance, since on the instance # itself accessing 'info' has side effects in that it sets # obj.__dict__['info'] if it does not exist already. return isinstance(getattr(obj.__class__, "info", None), cls) def _get_names_from_list_of_dict(rows): """Return list of column names if ``rows`` is a list of dict that defines table data. If rows is not a list of dict then return None. """ if rows is None: return None names = set() for row in rows: if not isinstance(row, Mapping): return None names.update(row) return list(names) # Note to future maintainers: when transitioning this to dict # be sure to change the OrderedDict ref(s) in Row and in __len__(). class TableColumns(OrderedDict): """OrderedDict subclass for a set of columns. This class enhances item access to provide convenient access to columns by name or index, including slice access. It also handles renaming of columns. The initialization argument ``cols`` can be a list of ``Column`` objects or any structure that is valid for initializing a Python dict. This includes a dict, list of (key, val) tuples or [key, val] lists, etc. Parameters ---------- cols : dict, list, tuple; optional Column objects as data structure that can init dict (see above) """ def __init__(self, cols={}): if isinstance(cols, (list, tuple)): # `cols` should be a list of two-tuples, but it is allowed to have # columns (BaseColumn or mixins) in the list. newcols = [] for col in cols: if has_info_class(col, BaseColumnInfo): newcols.append((col.info.name, col)) else: newcols.append(col) cols = newcols super().__init__(cols) def __getitem__(self, item): """Get items from a TableColumns object. :: tc = TableColumns(cols=[Column(name='a'), Column(name='b'), Column(name='c')]) tc['a'] # Column('a') tc[1] # Column('b') tc['a', 'b'] # <TableColumns names=('a', 'b')> tc[1:3] # <TableColumns names=('b', 'c')> """ if isinstance(item, str): return OrderedDict.__getitem__(self, item) elif isinstance(item, (int, np.integer)): return list(self.values())[item] elif ( isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == "i" ): return list(self.values())[item.item()] elif isinstance(item, tuple): return self.__class__([self[x] for x in item]) elif isinstance(item, slice): return self.__class__([self[x] for x in list(self)[item]]) else: raise IndexError( f"Illegal key or index value for {type(self).__name__} object" ) def __setitem__(self, item, value, validated=False): """ Set item in this dict instance, but do not allow directly replacing an existing column unless it is already validated (and thus is certain to not corrupt the table). NOTE: it is easily possible to corrupt a table by directly *adding* a new key to the TableColumns attribute of a Table, e.g. ``t.columns['jane'] = 'doe'``. """ if item in self and not validated: raise ValueError( f"Cannot replace column '{item}'. Use Table.replace_column() instead." ) super().__setitem__(item, value) def __repr__(self): names = (f"'{x}'" for x in self.keys()) return f"<{self.__class__.__name__} names=({','.join(names)})>" def _rename_column(self, name, new_name): if name == new_name: return if new_name in self: raise KeyError(f"Column {new_name} already exists") # Rename column names in pprint include/exclude attributes as needed parent_table = self[name].info.parent_table if parent_table is not None: parent_table.pprint_exclude_names._rename(name, new_name) parent_table.pprint_include_names._rename(name, new_name) mapper = {name: new_name} new_names = [mapper.get(name, name) for name in self] cols = list(self.values()) self.clear() self.update(list(zip(new_names, cols))) def __delitem__(self, name): # Remove column names from pprint include/exclude attributes as needed. # __delitem__ also gets called for pop() and popitem(). parent_table = self[name].info.parent_table if parent_table is not None: # _remove() method does not require that `name` is in the attribute parent_table.pprint_exclude_names._remove(name) parent_table.pprint_include_names._remove(name) return super().__delitem__(name) def isinstance(self, cls): """ Return a list of columns which are instances of the specified classes. Parameters ---------- cls : class or tuple thereof Column class (including mixin) or tuple of Column classes. Returns ------- col_list : list of `Column` List of Column objects which are instances of given classes. """ cols = [col for col in self.values() if isinstance(col, cls)] return cols def not_isinstance(self, cls): """ Return a list of columns which are not instances of the specified classes. Parameters ---------- cls : class or tuple thereof Column class (including mixin) or tuple of Column classes. Returns ------- col_list : list of `Column` List of Column objects which are not instances of given classes. """ cols = [col for col in self.values() if not isinstance(col, cls)] return cols class TableAttribute(MetaAttribute): """ Descriptor to define a custom attribute for a Table subclass. The value of the ``TableAttribute`` will be stored in a dict named ``__attributes__`` that is stored in the table ``meta``. The attribute can be accessed and set in the usual way, and it can be provided when creating the object. Defining an attribute by this mechanism ensures that it will persist if the table is sliced or serialized, for example as a pickle or ECSV file. See the `~astropy.utils.metadata.MetaAttribute` documentation for additional details. Parameters ---------- default : object Default value for attribute Examples -------- >>> from astropy.table import Table, TableAttribute >>> class MyTable(Table): ... identifier = TableAttribute(default=1) >>> t = MyTable(identifier=10) >>> t.identifier 10 >>> t.meta OrderedDict([('__attributes__', {'identifier': 10})]) """ class PprintIncludeExclude(TableAttribute): """Maintain tuple that controls table column visibility for print output. This is a descriptor that inherits from MetaAttribute so that the attribute value is stored in the table meta['__attributes__']. This gets used for the ``pprint_include_names`` and ``pprint_exclude_names`` Table attributes. """ def __get__(self, instance, owner_cls): """Get the attribute. This normally returns an instance of this class which is stored on the owner object. """ # For getting from class not an instance if instance is None: return self # If not already stored on `instance`, make a copy of the class # descriptor object and put it onto the instance. value = instance.__dict__.get(self.name) if value is None: value = deepcopy(self) instance.__dict__[self.name] = value # We set _instance_ref on every call, since if one makes copies of # instances, this attribute will be copied as well, which will lose the # reference. value._instance_ref = weakref.ref(instance) return value def __set__(self, instance, names): """Set value of ``instance`` attribute to ``names``. Parameters ---------- instance : object Instance that owns the attribute names : None, str, list, tuple Column name(s) to store, or None to clear """ if isinstance(names, str): names = [names] if names is None: # Remove attribute value from the meta['__attributes__'] dict. # Subsequent access will just return None. delattr(instance, self.name) else: # This stores names into instance.meta['__attributes__'] as tuple return super().__set__(instance, tuple(names)) def __call__(self): """Get the value of the attribute. Returns ------- names : None, tuple Include/exclude names """ # Get the value from instance.meta['__attributes__'] instance = self._instance_ref() return super().__get__(instance, instance.__class__) def __repr__(self): if hasattr(self, "_instance_ref"): out = f"<{self.__class__.__name__} name={self.name} value={self()}>" else: out = super().__repr__() return out def _add_remove_setup(self, names): """Common setup for add and remove. - Coerce attribute value to a list - Coerce names into a list - Get the parent table instance """ names = [names] if isinstance(names, str) else list(names) # Get the value. This is the same as self() but we need `instance` here. instance = self._instance_ref() value = super().__get__(instance, instance.__class__) value = [] if value is None else list(value) return instance, names, value def add(self, names): """Add ``names`` to the include/exclude attribute. Parameters ---------- names : str, list, tuple Column name(s) to add """ instance, names, value = self._add_remove_setup(names) value.extend(name for name in names if name not in value) super().__set__(instance, tuple(value)) def remove(self, names): """Remove ``names`` from the include/exclude attribute. Parameters ---------- names : str, list, tuple Column name(s) to remove """ self._remove(names, raise_exc=True) def _remove(self, names, raise_exc=False): """Remove ``names`` with optional checking if they exist.""" instance, names, value = self._add_remove_setup(names) # Return now if there are no attributes and thus no action to be taken. if not raise_exc and "__attributes__" not in instance.meta: return # Remove one by one, optionally raising an exception if name is missing. for name in names: if name in value: value.remove(name) # Using the list.remove method elif raise_exc: raise ValueError(f"{name} not in {self.name}") # Change to either None or a tuple for storing back to attribute value = None if value == [] else tuple(value) self.__set__(instance, value) def _rename(self, name, new_name): """Rename ``name`` to ``new_name`` if ``name`` is in the list.""" names = self() or () if name in names: new_names = list(names) new_names[new_names.index(name)] = new_name self.set(new_names) def set(self, names): """Set value of include/exclude attribute to ``names``. Parameters ---------- names : None, str, list, tuple Column name(s) to store, or None to clear """ class _Context: def __init__(self, descriptor_self): self.descriptor_self = descriptor_self self.names_orig = descriptor_self() def __enter__(self): pass def __exit__(self, type, value, tb): descriptor_self = self.descriptor_self instance = descriptor_self._instance_ref() descriptor_self.__set__(instance, self.names_orig) def __repr__(self): return repr(self.descriptor_self) ctx = _Context(descriptor_self=self) instance = self._instance_ref() self.__set__(instance, names) return ctx class Table: """A class to represent tables of heterogeneous data. `~astropy.table.Table` provides a class for heterogeneous tabular data. A key enhancement provided by the `~astropy.table.Table` class over e.g. a `numpy` structured array is the ability to easily modify the structure of the table by adding or removing columns, or adding new rows of data. In addition table and column metadata are fully supported. `~astropy.table.Table` differs from `~astropy.nddata.NDData` by the assumption that the input data consists of columns of homogeneous data, where each column has a unique identifier and may contain additional metadata such as the data unit, format, and description. See also: https://docs.astropy.org/en/stable/table/ Parameters ---------- data : numpy ndarray, dict, list, table-like object, optional Data to initialize table. masked : bool, optional Specify whether the table is masked. names : list, optional Specify column names. dtype : list, optional Specify column data types. meta : dict, optional Metadata associated with the table. copy : bool, optional Copy the input data. If the input is a Table the ``meta`` is always copied regardless of the ``copy`` parameter. Default is True. rows : numpy ndarray, list of list, optional Row-oriented data for table instead of ``data`` argument. copy_indices : bool, optional Copy any indices in the input data. Default is True. units : list, dict, optional List or dict of units to apply to columns. descriptions : list, dict, optional List or dict of descriptions to apply to columns. **kwargs : dict, optional Additional keyword args when converting table-like object. """ meta = MetaData(copy=False) # Define class attributes for core container objects to allow for subclass # customization. Row = Row Column = Column MaskedColumn = MaskedColumn TableColumns = TableColumns TableFormatter = TableFormatter # Unified I/O read and write methods from .connect read = UnifiedReadWriteMethod(TableRead) write = UnifiedReadWriteMethod(TableWrite) pprint_exclude_names = PprintIncludeExclude() pprint_include_names = PprintIncludeExclude() def as_array(self, keep_byteorder=False, names=None): """ Return a new copy of the table in the form of a structured np.ndarray or np.ma.MaskedArray object (as appropriate). Parameters ---------- keep_byteorder : bool, optional By default the returned array has all columns in native byte order. However, if this option is `True` this preserves the byte order of all columns (if any are non-native). names : list, optional: List of column names to include for returned structured array. Default is to include all table columns. Returns ------- table_array : array or `~numpy.ma.MaskedArray` Copy of table as a numpy structured array. ndarray for unmasked or `~numpy.ma.MaskedArray` for masked. """ masked = self.masked or self.has_masked_columns or self.has_masked_values empty_init = ma.empty if masked else np.empty if len(self.columns) == 0: return empty_init(0, dtype=None) dtype = [] cols = self.columns.values() if names is not None: cols = [col for col in cols if col.info.name in names] for col in cols: col_descr = descr(col) if not (col.info.dtype.isnative or keep_byteorder): new_dt = np.dtype(col_descr[1]).newbyteorder("=") col_descr = (col_descr[0], new_dt, col_descr[2]) dtype.append(col_descr) data = empty_init(len(self), dtype=dtype) for col in cols: # When assigning from one array into a field of a structured array, # Numpy will automatically swap those columns to their destination # byte order where applicable data[col.info.name] = col # For masked out, masked mixin columns need to set output mask attribute. if masked and has_info_class(col, MixinInfo) and hasattr(col, "mask"): data[col.info.name].mask = col.mask return data def __init__( self, data=None, masked=False, names=None, dtype=None, meta=None, copy=True, rows=None, copy_indices=True, units=None, descriptions=None, **kwargs, ): # Set up a placeholder empty table self._set_masked(masked) self.columns = self.TableColumns() self.formatter = self.TableFormatter() self._copy_indices = True # copy indices from this Table by default self._init_indices = copy_indices # whether to copy indices in init self.primary_key = None # Must copy if dtype are changing if not copy and dtype is not None: raise ValueError("Cannot specify dtype when copy=False") # Specifies list of names found for the case of initializing table with # a list of dict. If data are not list of dict then this is None. names_from_list_of_dict = None # Row-oriented input, e.g. list of lists or list of tuples, list of # dict, Row instance. Set data to something that the subsequent code # will parse correctly. if rows is not None: if data is not None: raise ValueError("Cannot supply both `data` and `rows` values") if isinstance(rows, types.GeneratorType): # Without this then the all(..) test below uses up the generator rows = list(rows) # Get column names if `rows` is a list of dict, otherwise this is None names_from_list_of_dict = _get_names_from_list_of_dict(rows) if names_from_list_of_dict: data = rows elif isinstance(rows, self.Row): data = rows else: data = list(zip(*rows)) # Infer the type of the input data and set up the initialization # function, number of columns, and potentially the default col names default_names = None # Handle custom (subclass) table attributes that are stored in meta. # These are defined as class attributes using the TableAttribute # descriptor. Any such attributes get removed from kwargs here and # stored for use after the table is otherwise initialized. Any values # provided via kwargs will have precedence over existing values from # meta (e.g. from data as a Table or meta via kwargs). meta_table_attrs = {} if kwargs: for attr in list(kwargs): descr = getattr(self.__class__, attr, None) if isinstance(descr, TableAttribute): meta_table_attrs[attr] = kwargs.pop(attr) if hasattr(data, "__astropy_table__"): # Data object implements the __astropy_table__ interface method. # Calling that method returns an appropriate instance of # self.__class__ and respects the `copy` arg. The returned # Table object should NOT then be copied. data = data.__astropy_table__(self.__class__, copy, **kwargs) copy = False elif kwargs: raise TypeError( f"__init__() got unexpected keyword argument {list(kwargs.keys())[0]!r}" ) if isinstance(data, np.ndarray) and data.shape == (0,) and not data.dtype.names: data = None if isinstance(data, self.Row): data = data._table[data._index : data._index + 1] if isinstance(data, (list, tuple)): # Get column names from `data` if it is a list of dict, otherwise this is None. # This might be previously defined if `rows` was supplied as an init arg. names_from_list_of_dict = ( names_from_list_of_dict or _get_names_from_list_of_dict(data) ) if names_from_list_of_dict: init_func = self._init_from_list_of_dicts n_cols = len(names_from_list_of_dict) else: init_func = self._init_from_list n_cols = len(data) elif isinstance(data, np.ndarray): if data.dtype.names: init_func = self._init_from_ndarray # _struct n_cols = len(data.dtype.names) default_names = data.dtype.names else: init_func = self._init_from_ndarray # _homog if data.shape == (): raise ValueError("Can not initialize a Table with a scalar") elif len(data.shape) == 1: data = data[np.newaxis, :] n_cols = data.shape[1] elif isinstance(data, Mapping): init_func = self._init_from_dict default_names = list(data) n_cols = len(default_names) elif isinstance(data, Table): # If user-input meta is None then use data.meta (if non-trivial) if meta is None and data.meta: # At this point do NOT deepcopy data.meta as this will happen after # table init_func() is called. But for table input the table meta # gets a key copy here if copy=False because later a direct object ref # is used. meta = data.meta if copy else data.meta.copy() # Handle indices on input table. Copy primary key and don't copy indices # if the input Table is in non-copy mode. self.primary_key = data.primary_key self._init_indices = self._init_indices and data._copy_indices # Extract default names, n_cols, and then overwrite ``data`` to be the # table columns so we can use _init_from_list. default_names = data.colnames n_cols = len(default_names) data = list(data.columns.values()) init_func = self._init_from_list elif data is None: if names is None: if dtype is None: # Table was initialized as `t = Table()`. Set up for empty # table with names=[], data=[], and n_cols=0. # self._init_from_list() will simply return, giving the # expected empty table. names = [] else: try: # No data nor names but dtype is available. This must be # valid to initialize a structured array. dtype = np.dtype(dtype) names = dtype.names dtype = [dtype[name] for name in names] except Exception: raise ValueError( "dtype was specified but could not be " "parsed for column names" ) # names is guaranteed to be set at this point init_func = self._init_from_list n_cols = len(names) data = [[]] * n_cols else: raise ValueError(f"Data type {type(data)} not allowed to init Table") # Set up defaults if names and/or dtype are not specified. # A value of None means the actual value will be inferred # within the appropriate initialization routine, either from # existing specification or auto-generated. if dtype is None: dtype = [None] * n_cols elif isinstance(dtype, np.dtype): if default_names is None: default_names = dtype.names # Convert a numpy dtype input to a list of dtypes for later use. dtype = [dtype[name] for name in dtype.names] if names is None: names = default_names or [None] * n_cols names = [None if name is None else str(name) for name in names] self._check_names_dtype(names, dtype, n_cols) # Finally do the real initialization init_func(data, names, dtype, n_cols, copy) # Set table meta. If copy=True then deepcopy meta otherwise use the # user-supplied meta directly. if meta is not None: self.meta = deepcopy(meta) if copy else meta # Update meta with TableAttributes supplied as kwargs in Table init. # This takes precedence over previously-defined meta. if meta_table_attrs: for attr, value in meta_table_attrs.items(): setattr(self, attr, value) # Whatever happens above, the masked property should be set to a boolean if self.masked not in (None, True, False): raise TypeError("masked property must be None, True or False") self._set_column_attribute("unit", units) self._set_column_attribute("description", descriptions) def _set_column_attribute(self, attr, values): """Set ``attr`` for columns to ``values``, which can be either a dict (keyed by column name) or a dict of name: value pairs. This is used for handling the ``units`` and ``descriptions`` kwargs to ``__init__``. """ if not values: return if isinstance(values, Row): # For a Row object transform to an equivalent dict. values = {name: values[name] for name in values.colnames} if not isinstance(values, Mapping): # If not a dict map, assume iterable and map to dict if the right length if len(values) != len(self.columns): raise ValueError( f"sequence of {attr} values must match number of columns" ) values = dict(zip(self.colnames, values)) for name, value in values.items(): if name not in self.columns: raise ValueError( f"invalid column name {name} for setting {attr} attribute" ) # Special case: ignore unit if it is an empty or blank string if attr == "unit" and isinstance(value, str): if value.strip() == "": value = None if value not in (np.ma.masked, None): col = self[name] if attr == "unit" and isinstance(col, Quantity): # Update the Quantity unit in-place col <<= value else: setattr(col.info, attr, value) def __getstate__(self): columns = OrderedDict( (key, col if isinstance(col, BaseColumn) else col_copy(col)) for key, col in self.columns.items() ) return (columns, self.meta) def __setstate__(self, state): columns, meta = state self.__init__(columns, meta=meta) @property def mask(self): # Dynamic view of available masks if self.masked or self.has_masked_columns or self.has_masked_values: mask_table = Table( [ getattr(col, "mask", FalseArray(col.shape)) for col in self.itercols() ], names=self.colnames, copy=False, ) # Set hidden attribute to force inplace setitem so that code like # t.mask['a'] = [1, 0, 1] will correctly set the underlying mask. # See #5556 for discussion. mask_table._setitem_inplace = True else: mask_table = None return mask_table @mask.setter def mask(self, val): self.mask[:] = val @property def _mask(self): """This is needed so that comparison of a masked Table and a MaskedArray works. The requirement comes from numpy.ma.core so don't remove this property. """ return self.as_array().mask def filled(self, fill_value=None): """Return copy of self, with masked values filled. If input ``fill_value`` supplied then that value is used for all masked entries in the table. Otherwise the individual ``fill_value`` defined for each table column is used. Parameters ---------- fill_value : str If supplied, this ``fill_value`` is used for all masked entries in the entire table. Returns ------- filled_table : `~astropy.table.Table` New table with masked values filled """ if self.masked or self.has_masked_columns or self.has_masked_values: # Get new columns with masked values filled, then create Table with those # new cols (copy=False) but deepcopy the meta. data = [ col.filled(fill_value) if hasattr(col, "filled") else col for col in self.itercols() ] return self.__class__(data, meta=deepcopy(self.meta), copy=False) else: # Return copy of the original object. return self.copy() @property def indices(self): """ Return the indices associated with columns of the table as a TableIndices object. """ lst = [] for column in self.columns.values(): for index in column.info.indices: if sum(index is x for x in lst) == 0: # ensure uniqueness lst.append(index) return TableIndices(lst) @property def loc(self): """ Return a TableLoc object that can be used for retrieving rows by index in a given data range. Note that both loc and iloc work only with single-column indices. """ return TableLoc(self) @property def loc_indices(self): """ Return a TableLocIndices object that can be used for retrieving the row indices corresponding to given table index key value or values. """ return TableLocIndices(self) @property def iloc(self): """ Return a TableILoc object that can be used for retrieving indexed rows in the order they appear in the index. """ return TableILoc(self) def add_index(self, colnames, engine=None, unique=False): """ Insert a new index among one or more columns. If there are no indices, make this index the primary table index. Parameters ---------- colnames : str or list List of column names (or a single column name) to index engine : type or None Indexing engine class to use, either `~astropy.table.SortedArray`, `~astropy.table.BST`, or `~astropy.table.SCEngine`. If the supplied argument is None (by default), use `~astropy.table.SortedArray`. unique : bool Whether the values of the index must be unique. Default is False. """ if isinstance(colnames, str): colnames = (colnames,) columns = self.columns[tuple(colnames)].values() # make sure all columns support indexing for col in columns: if not getattr(col.info, "_supports_indexing", False): raise ValueError( 'Cannot create an index on column "{}", of type "{}"'.format( col.info.name, type(col) ) ) is_primary = not self.indices index = Index(columns, engine=engine, unique=unique) sliced_index = SlicedIndex(index, slice(0, 0, None), original=True) if is_primary: self.primary_key = colnames for col in columns: col.info.indices.append(sliced_index) def remove_indices(self, colname): """ Remove all indices involving the given column. If the primary index is removed, the new primary index will be the most recently added remaining index. Parameters ---------- colname : str Name of column """ col = self.columns[colname] for index in self.indices: try: index.col_position(col.info.name) except ValueError: pass else: for c in index.columns: c.info.indices.remove(index) def index_mode(self, mode): """ Return a context manager for an indexing mode. Parameters ---------- mode : str Either 'freeze', 'copy_on_getitem', or 'discard_on_copy'. In 'discard_on_copy' mode, indices are not copied whenever columns or tables are copied. In 'freeze' mode, indices are not modified whenever columns are modified; at the exit of the context, indices refresh themselves based on column values. This mode is intended for scenarios in which one intends to make many additions or modifications in an indexed column. In 'copy_on_getitem' mode, indices are copied when taking column slices as well as table slices, so col[i0:i1] will preserve indices. """ return _IndexModeContext(self, mode) def __array__(self, dtype=None): """Support converting Table to np.array via np.array(table). Coercion to a different dtype via np.array(table, dtype) is not supported and will raise a ValueError. """ if dtype is not None: if np.dtype(dtype) != object: raise ValueError("Datatype coercion is not allowed") out = np.array(None, dtype=object) out[()] = self return out # This limitation is because of the following unexpected result that # should have made a table copy while changing the column names. # # >>> d = astropy.table.Table([[1,2],[3,4]]) # >>> np.array(d, dtype=[('a', 'i8'), ('b', 'i8')]) # array([(0, 0), (0, 0)], # dtype=[('a', '<i8'), ('b', '<i8')]) out = self.as_array() return out.data if isinstance(out, np.ma.MaskedArray) else out def _check_names_dtype(self, names, dtype, n_cols): """Make sure that names and dtype are both iterable and have the same length as data. """ for inp_list, inp_str in ((dtype, "dtype"), (names, "names")): if not isiterable(inp_list): raise ValueError(f"{inp_str} must be a list or None") if len(names) != n_cols or len(dtype) != n_cols: raise ValueError( 'Arguments "names" and "dtype" must match number of columns' ) def _init_from_list_of_dicts(self, data, names, dtype, n_cols, copy): """Initialize table from a list of dictionaries representing rows.""" # Define placeholder for missing values as a unique object that cannot # every occur in user data. MISSING = object() # Gather column names that exist in the input `data`. names_from_data = set() for row in data: names_from_data.update(row) if set(data[0].keys()) == names_from_data: names_from_data = list(data[0].keys()) else: names_from_data = sorted(names_from_data) # Note: if set(data[0].keys()) != names_from_data, this will give an # exception later, so NO need to catch here. # Convert list of dict into dict of list (cols), keep track of missing # indexes and put in MISSING placeholders in the `cols` lists. cols = {} missing_indexes = defaultdict(list) for name in names_from_data: cols[name] = [] for ii, row in enumerate(data): try: val = row[name] except KeyError: missing_indexes[name].append(ii) val = MISSING cols[name].append(val) # Fill the missing entries with first values if missing_indexes: for name, indexes in missing_indexes.items(): col = cols[name] first_val = next(val for val in col if val is not MISSING) for index in indexes: col[index] = first_val # prepare initialization if all(name is None for name in names): names = names_from_data self._init_from_dict(cols, names, dtype, n_cols, copy) # Mask the missing values if necessary, converting columns to MaskedColumn # as needed. if missing_indexes: for name, indexes in missing_indexes.items(): col = self[name] # Ensure that any Column subclasses with MISSING values can support # setting masked values. As of astropy 4.0 the test condition below is # always True since _init_from_dict cannot result in mixin columns. if isinstance(col, Column) and not isinstance(col, MaskedColumn): self[name] = self.MaskedColumn(col, copy=False) # Finally do the masking in a mixin-safe way. self[name][indexes] = np.ma.masked def _init_from_list(self, data, names, dtype, n_cols, copy): """Initialize table from a list of column data. A column can be a Column object, np.ndarray, mixin, or any other iterable object. """ # Special case of initializing an empty table like `t = Table()`. No # action required at this point. if n_cols == 0: return cols = [] default_names = _auto_names(n_cols) for col, name, default_name, dtype in zip(data, names, default_names, dtype): col = self._convert_data_to_col(col, copy, default_name, dtype, name) cols.append(col) self._init_from_cols(cols) def _convert_data_to_col( self, data, copy=True, default_name=None, dtype=None, name=None ): """ Convert any allowed sequence data ``col`` to a column object that can be used directly in the self.columns dict. This could be a Column, MaskedColumn, or mixin column. The final column name is determined by:: name or data.info.name or def_name If ``data`` has no ``info`` then ``name = name or def_name``. The behavior of ``copy`` for Column objects is: - copy=True: new class instance with a copy of data and deep copy of meta - copy=False: new class instance with same data and a key-only copy of meta For mixin columns: - copy=True: new class instance with copy of data and deep copy of meta - copy=False: original instance (no copy at all) Parameters ---------- data : object (column-like sequence) Input column data copy : bool Make a copy default_name : str Default name dtype : np.dtype or None Data dtype name : str or None Column name Returns ------- col : Column, MaskedColumn, mixin-column type Object that can be used as a column in self """ data_is_mixin = self._is_mixin_for_table(data) masked_col_cls = ( self.ColumnClass if issubclass(self.ColumnClass, self.MaskedColumn) else self.MaskedColumn ) try: data0_is_mixin = self._is_mixin_for_table(data[0]) except Exception: # Need broad exception, cannot predict what data[0] raises for arbitrary data data0_is_mixin = False # If the data is not an instance of Column or a mixin class, we can # check the registry of mixin 'handlers' to see if the column can be # converted to a mixin class if (handler := get_mixin_handler(data)) is not None: original_data = data data = handler(data) if not (data_is_mixin := self._is_mixin_for_table(data)): fully_qualified_name = ( original_data.__class__.__module__ + "." + original_data.__class__.__name__ ) raise TypeError( "Mixin handler for object of type " f"{fully_qualified_name} " "did not return a valid mixin column" ) # Get the final column name using precedence. Some objects may not # have an info attribute. Also avoid creating info as a side effect. if not name: if isinstance(data, Column): name = data.name or default_name elif "info" in getattr(data, "__dict__", ()): name = data.info.name or default_name else: name = default_name if isinstance(data, Column): # If self.ColumnClass is a subclass of col, then "upgrade" to ColumnClass, # otherwise just use the original class. The most common case is a # table with masked=True and ColumnClass=MaskedColumn. Then a Column # gets upgraded to MaskedColumn, but the converse (pre-4.0) behavior # of downgrading from MaskedColumn to Column (for non-masked table) # does not happen. col_cls = self._get_col_cls_for_table(data) elif data_is_mixin: # Copy the mixin column attributes if they exist since the copy below # may not get this attribute. If not copying, take a slice # to ensure we get a new instance and we do not share metadata # like info. col = col_copy(data, copy_indices=self._init_indices) if copy else data[:] col.info.name = name return col elif data0_is_mixin: # Handle case of a sequence of a mixin, e.g. [1*u.m, 2*u.m]. try: col = data[0].__class__(data) col.info.name = name return col except Exception: # If that didn't work for some reason, just turn it into np.array of object data = np.array(data, dtype=object) col_cls = self.ColumnClass elif isinstance(data, (np.ma.MaskedArray, Masked)): # Require that col_cls be a subclass of MaskedColumn, remembering # that ColumnClass could be a user-defined subclass (though more-likely # could be MaskedColumn). col_cls = masked_col_cls elif data is None: # Special case for data passed as the None object (for broadcasting # to an object column). Need to turn data into numpy `None` scalar # object, otherwise `Column` interprets data=None as no data instead # of a object column of `None`. data = np.array(None) col_cls = self.ColumnClass elif not hasattr(data, "dtype"): # `data` is none of the above, convert to numpy array or MaskedArray # assuming only that it is a scalar or sequence or N-d nested # sequence. This function is relatively intricate and tries to # maintain performance for common cases while handling things like # list input with embedded np.ma.masked entries. If `data` is a # scalar then it gets returned unchanged so the original object gets # passed to `Column` later. data = _convert_sequence_data_to_array(data, dtype) copy = False # Already made a copy above col_cls = ( masked_col_cls if isinstance(data, np.ma.MaskedArray) else self.ColumnClass ) else: col_cls = self.ColumnClass try: col = col_cls( name=name, data=data, dtype=dtype, copy=copy, copy_indices=self._init_indices, ) except Exception: # Broad exception class since we don't know what might go wrong raise ValueError("unable to convert data to Column for Table") col = self._convert_col_for_table(col) return col def _init_from_ndarray(self, data, names, dtype, n_cols, copy): """Initialize table from an ndarray structured array.""" data_names = data.dtype.names or _auto_names(n_cols) struct = data.dtype.names is not None names = [name or data_names[i] for i, name in enumerate(names)] cols = ( [data[name] for name in data_names] if struct else [data[:, i] for i in range(n_cols)] ) self._init_from_list(cols, names, dtype, n_cols, copy) def _init_from_dict(self, data, names, dtype, n_cols, copy): """Initialize table from a dictionary of columns.""" data_list = [data[name] for name in names] self._init_from_list(data_list, names, dtype, n_cols, copy) def _get_col_cls_for_table(self, col): """Get the correct column class to use for upgrading any Column-like object. For a masked table, ensure any Column-like object is a subclass of the table MaskedColumn. For unmasked table, ensure any MaskedColumn-like object is a subclass of the table MaskedColumn. If not a MaskedColumn, then ensure that any Column-like object is a subclass of the table Column. """ col_cls = col.__class__ if self.masked: if isinstance(col, Column) and not isinstance(col, self.MaskedColumn): col_cls = self.MaskedColumn else: if isinstance(col, MaskedColumn): if not isinstance(col, self.MaskedColumn): col_cls = self.MaskedColumn elif isinstance(col, Column) and not isinstance(col, self.Column): col_cls = self.Column return col_cls def _convert_col_for_table(self, col): """ Make sure that all Column objects have correct base class for this type of Table. For a base Table this most commonly means setting to MaskedColumn if the table is masked. Table subclasses like QTable override this method. """ if isinstance(col, Column) and not isinstance(col, self.ColumnClass): col_cls = self._get_col_cls_for_table(col) if col_cls is not col.__class__: col = col_cls(col, copy=False) return col def _init_from_cols(self, cols): """Initialize table from a list of Column or mixin objects.""" lengths = {len(col) for col in cols} if len(lengths) > 1: raise ValueError(f"Inconsistent data column lengths: {lengths}") # Make sure that all Column-based objects have correct class. For # plain Table this is self.ColumnClass, but for instance QTable will # convert columns with units to a Quantity mixin. newcols = [self._convert_col_for_table(col) for col in cols] self._make_table_from_cols(self, newcols) # Deduplicate indices. It may happen that after pickling or when # initing from an existing table that column indices which had been # references to a single index object got *copied* into an independent # object. This results in duplicates which will cause downstream problems. index_dict = {} for col in self.itercols(): for i, index in enumerate(col.info.indices or []): names = tuple(ind_col.info.name for ind_col in index.columns) if names in index_dict: col.info.indices[i] = index_dict[names] else: index_dict[names] = index def _new_from_slice(self, slice_): """Create a new table as a referenced slice from self.""" table = self.__class__(masked=self.masked) if self.meta: table.meta = self.meta.copy() # Shallow copy for slice table.primary_key = self.primary_key newcols = [] for col in self.columns.values(): newcol = col[slice_] # Note in line below, use direct attribute access to col.indices for Column # instances instead of the generic col.info.indices. This saves about 4 usec # per column. if (col if isinstance(col, Column) else col.info).indices: # TODO : as far as I can tell the only purpose of setting _copy_indices # here is to communicate that to the initial test in `slice_indices`. # Why isn't that just sent as an arg to the function? col.info._copy_indices = self._copy_indices newcol = col.info.slice_indices(newcol, slice_, len(col)) # Don't understand why this is forcing a value on the original column. # Normally col.info does not even have a _copy_indices attribute. Tests # still pass if this line is deleted. (Each col.info attribute access # is expensive). col.info._copy_indices = True newcols.append(newcol) self._make_table_from_cols( table, newcols, verify=False, names=self.columns.keys() ) return table @staticmethod def _make_table_from_cols(table, cols, verify=True, names=None): """ Make ``table`` in-place so that it represents the given list of ``cols``. """ if names is None: names = [col.info.name for col in cols] # Note: we do not test for len(names) == len(cols) if names is not None. In that # case the function is being called by from "trusted" source (e.g. right above here) # that is assumed to provide valid inputs. In that case verify=False. if verify: if None in names: raise TypeError("Cannot have None for column name") if len(set(names)) != len(names): raise ValueError("Duplicate column names") table.columns = table.TableColumns( (name, col) for name, col in zip(names, cols) ) for col in cols: table._set_col_parent_table_and_mask(col) def _set_col_parent_table_and_mask(self, col): """ Set ``col.parent_table = self`` and force ``col`` to have ``mask`` attribute if the table is masked and ``col.mask`` does not exist. """ # For Column instances it is much faster to do direct attribute access # instead of going through .info col_info = col if isinstance(col, Column) else col.info col_info.parent_table = self # Legacy behavior for masked table if self.masked and not hasattr(col, "mask"): col.mask = FalseArray(col.shape) def itercols(self): """ Iterate over the columns of this table. Examples -------- To iterate over the columns of a table:: >>> t = Table([[1], [2]]) >>> for col in t.itercols(): ... print(col) col0 ---- 1 col1 ---- 2 Using ``itercols()`` is similar to ``for col in t.columns.values()`` but is syntactically preferred. """ for colname in self.columns: yield self[colname] def _base_repr_( self, html=False, descr_vals=None, max_width=None, tableid=None, show_dtype=True, max_lines=None, tableclass=None, ): if descr_vals is None: descr_vals = [self.__class__.__name__] if self.masked: descr_vals.append("masked=True") descr_vals.append(f"length={len(self)}") descr = " ".join(descr_vals) if html: from astropy.utils.xml.writer import xml_escape descr = f"<i>{xml_escape(descr)}</i>\n" else: descr = f"<{descr}>\n" if tableid is None: tableid = f"table{id(self)}" data_lines, outs = self.formatter._pformat_table( self, tableid=tableid, html=html, max_width=max_width, show_name=True, show_unit=None, show_dtype=show_dtype, max_lines=max_lines, tableclass=tableclass, ) out = descr + "\n".join(data_lines) return out def _repr_html_(self): out = self._base_repr_( html=True, max_width=-1, tableclass=conf.default_notebook_table_class ) # Wrap <table> in <div>. This follows the pattern in pandas and allows # table to be scrollable horizontally in VS Code notebook display. out = f"<div>{out}</div>" return out def __repr__(self): return self._base_repr_(html=False, max_width=None) def __str__(self): return "\n".join(self.pformat()) def __bytes__(self): return str(self).encode("utf-8") @property def has_mixin_columns(self): """ True if table has any mixin columns (defined as columns that are not Column subclasses). """ return any(has_info_class(col, MixinInfo) for col in self.columns.values()) @property def has_masked_columns(self): """True if table has any ``MaskedColumn`` columns. This does not check for mixin columns that may have masked values, use the ``has_masked_values`` property in that case. """ return any(isinstance(col, MaskedColumn) for col in self.itercols()) @property def has_masked_values(self): """True if column in the table has values which are masked. This may be relatively slow for large tables as it requires checking the mask values of each column. """ return any(hasattr(col, "mask") and np.any(col.mask) for col in self.itercols()) def _is_mixin_for_table(self, col): """ Determine if ``col`` should be added to the table directly as a mixin column. """ if isinstance(col, BaseColumn): return False # Is it a mixin but not [Masked]Quantity (which gets converted to # [Masked]Column with unit set). return has_info_class(col, MixinInfo) and not has_info_class(col, QuantityInfo) @format_doc(_pprint_docs) def pprint( self, max_lines=None, max_width=None, show_name=True, show_unit=None, show_dtype=False, align=None, ): """Print a formatted string representation of the table. If no value of ``max_lines`` is supplied then the height of the screen terminal is used to set ``max_lines``. If the terminal height cannot be determined then the default is taken from the configuration item ``astropy.conf.max_lines``. If a negative value of ``max_lines`` is supplied then there is no line limit applied. The same applies for max_width except the configuration item is ``astropy.conf.max_width``. """ lines, outs = self.formatter._pformat_table( self, max_lines, max_width, show_name=show_name, show_unit=show_unit, show_dtype=show_dtype, align=align, ) if outs["show_length"]: lines.append(f"Length = {len(self)} rows") n_header = outs["n_header"] for i, line in enumerate(lines): if i < n_header: color_print(line, "red") else: print(line) @format_doc(_pprint_docs) def pprint_all( self, max_lines=-1, max_width=-1, show_name=True, show_unit=None, show_dtype=False, align=None, ): """Print a formatted string representation of the entire table. This method is the same as `astropy.table.Table.pprint` except that the default ``max_lines`` and ``max_width`` are both -1 so that by default the entire table is printed instead of restricting to the size of the screen terminal. """ return self.pprint( max_lines, max_width, show_name, show_unit, show_dtype, align ) def _make_index_row_display_table(self, index_row_name): if index_row_name not in self.columns: idx_col = self.ColumnClass(name=index_row_name, data=np.arange(len(self))) return self.__class__([idx_col] + list(self.columns.values()), copy=False) else: return self def show_in_notebook( self, tableid=None, css=None, display_length=50, table_class="astropy-default", show_row_index="idx", ): """Render the table in HTML and show it in the IPython notebook. Parameters ---------- tableid : str or None An html ID tag for the table. Default is ``table{id}-XXX``, where id is the unique integer id of the table object, id(self), and XXX is a random number to avoid conflicts when printing the same table multiple times. table_class : str or None A string with a list of HTML classes used to style the table. The special default string ('astropy-default') means that the string will be retrieved from the configuration item ``astropy.table.default_notebook_table_class``. Note that these table classes may make use of bootstrap, as this is loaded with the notebook. See `this page <https://getbootstrap.com/css/#tables>`_ for the list of classes. css : str A valid CSS string declaring the formatting for the table. Defaults to ``astropy.table.jsviewer.DEFAULT_CSS_NB``. display_length : int, optional Number or rows to show. Defaults to 50. show_row_index : str or False If this does not evaluate to False, a column with the given name will be added to the version of the table that gets displayed. This new column shows the index of the row in the table itself, even when the displayed table is re-sorted by another column. Note that if a column with this name already exists, this option will be ignored. Defaults to "idx". Notes ----- Currently, unlike `show_in_browser` (with ``jsviewer=True``), this method needs to access online javascript code repositories. This is due to modern browsers' limitations on accessing local files. Hence, if you call this method while offline (and don't have a cached version of jquery and jquery.dataTables), you will not get the jsviewer features. """ from IPython.display import HTML from .jsviewer import JSViewer if tableid is None: tableid = f"table{id(self)}-{np.random.randint(1, 1e6)}" jsv = JSViewer(display_length=display_length) if show_row_index: display_table = self._make_index_row_display_table(show_row_index) else: display_table = self if table_class == "astropy-default": table_class = conf.default_notebook_table_class html = display_table._base_repr_( html=True, max_width=-1, tableid=tableid, max_lines=-1, show_dtype=False, tableclass=table_class, ) columns = display_table.columns.values() sortable_columns = [ i for i, col in enumerate(columns) if col.info.dtype.kind in "iufc" ] html += jsv.ipynb(tableid, css=css, sort_columns=sortable_columns) return HTML(html) def show_in_browser( self, max_lines=5000, jsviewer=False, browser="default", jskwargs={"use_local_files": True}, tableid=None, table_class="display compact", css=None, show_row_index="idx", ): """Render the table in HTML and show it in a web browser. Parameters ---------- max_lines : int Maximum number of rows to export to the table (set low by default to avoid memory issues, since the browser view requires duplicating the table in memory). A negative value of ``max_lines`` indicates no row limit. jsviewer : bool If `True`, prepends some javascript headers so that the table is rendered as a `DataTables <https://datatables.net>`_ data table. This allows in-browser searching & sorting. browser : str Any legal browser name, e.g. ``'firefox'``, ``'chrome'``, ``'safari'`` (for mac, you may need to use ``'open -a "/Applications/Google Chrome.app" {}'`` for Chrome). If ``'default'``, will use the system default browser. jskwargs : dict Passed to the `astropy.table.JSViewer` init. Defaults to ``{'use_local_files': True}`` which means that the JavaScript libraries will be served from local copies. tableid : str or None An html ID tag for the table. Default is ``table{id}``, where id is the unique integer id of the table object, id(self). table_class : str or None A string with a list of HTML classes used to style the table. Default is "display compact", and other possible values can be found in https://www.datatables.net/manual/styling/classes css : str A valid CSS string declaring the formatting for the table. Defaults to ``astropy.table.jsviewer.DEFAULT_CSS``. show_row_index : str or False If this does not evaluate to False, a column with the given name will be added to the version of the table that gets displayed. This new column shows the index of the row in the table itself, even when the displayed table is re-sorted by another column. Note that if a column with this name already exists, this option will be ignored. Defaults to "idx". """ import os import tempfile import webbrowser from urllib.parse import urljoin from urllib.request import pathname2url from .jsviewer import DEFAULT_CSS if css is None: css = DEFAULT_CSS # We can't use NamedTemporaryFile here because it gets deleted as # soon as it gets garbage collected. tmpdir = tempfile.mkdtemp() path = os.path.join(tmpdir, "table.html") with open(path, "w") as tmp: if jsviewer: if show_row_index: display_table = self._make_index_row_display_table(show_row_index) else: display_table = self display_table.write( tmp, format="jsviewer", css=css, max_lines=max_lines, jskwargs=jskwargs, table_id=tableid, table_class=table_class, ) else: self.write(tmp, format="html") try: br = webbrowser.get(None if browser == "default" else browser) except webbrowser.Error: log.error(f"Browser '{browser}' not found.") else: br.open(urljoin("file:", pathname2url(path))) @format_doc(_pformat_docs, id="{id}") def pformat( self, max_lines=None, max_width=None, show_name=True, show_unit=None, show_dtype=False, html=False, tableid=None, align=None, tableclass=None, ): """Return a list of lines for the formatted string representation of the table. If no value of ``max_lines`` is supplied then the height of the screen terminal is used to set ``max_lines``. If the terminal height cannot be determined then the default is taken from the configuration item ``astropy.conf.max_lines``. If a negative value of ``max_lines`` is supplied then there is no line limit applied. The same applies for ``max_width`` except the configuration item is ``astropy.conf.max_width``. """ lines, outs = self.formatter._pformat_table( self, max_lines, max_width, show_name=show_name, show_unit=show_unit, show_dtype=show_dtype, html=html, tableid=tableid, tableclass=tableclass, align=align, ) if outs["show_length"]: lines.append(f"Length = {len(self)} rows") return lines @format_doc(_pformat_docs, id="{id}") def pformat_all( self, max_lines=-1, max_width=-1, show_name=True, show_unit=None, show_dtype=False, html=False, tableid=None, align=None, tableclass=None, ): """Return a list of lines for the formatted string representation of the entire table. If no value of ``max_lines`` is supplied then the height of the screen terminal is used to set ``max_lines``. If the terminal height cannot be determined then the default is taken from the configuration item ``astropy.conf.max_lines``. If a negative value of ``max_lines`` is supplied then there is no line limit applied. The same applies for ``max_width`` except the configuration item is ``astropy.conf.max_width``. """ return self.pformat( max_lines, max_width, show_name, show_unit, show_dtype, html, tableid, align, tableclass, ) def more( self, max_lines=None, max_width=None, show_name=True, show_unit=None, show_dtype=False, ): """Interactively browse table with a paging interface. Supported keys:: f, <space> : forward one page b : back one page r : refresh same page n : next row p : previous row < : go to beginning > : go to end q : quit browsing h : print this help Parameters ---------- max_lines : int Maximum number of lines in table output max_width : int or None Maximum character width of output show_name : bool Include a header row for column names. Default is True. show_unit : bool Include a header row for unit. Default is to show a row for units only if one or more columns has a defined value for the unit. show_dtype : bool Include a header row for column dtypes. Default is False. """ self.formatter._more_tabcol( self, max_lines, max_width, show_name=show_name, show_unit=show_unit, show_dtype=show_dtype, ) def __getitem__(self, item): if isinstance(item, str): return self.columns[item] elif isinstance(item, (int, np.integer)): return self.Row(self, item) elif ( isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == "i" ): return self.Row(self, item.item()) elif self._is_list_or_tuple_of_str(item): out = self.__class__( [self[x] for x in item], copy_indices=self._copy_indices ) out._groups = groups.TableGroups( out, indices=self.groups._indices, keys=self.groups._keys ) out.meta = self.meta.copy() # Shallow copy for meta return out elif (isinstance(item, np.ndarray) and item.size == 0) or ( isinstance(item, (tuple, list)) and not item ): # If item is an empty array/list/tuple then return the table with no rows return self._new_from_slice([]) elif ( isinstance(item, (slice, np.ndarray, list)) or isinstance(item, tuple) and all(isinstance(x, np.ndarray) for x in item) ): # here for the many ways to give a slice; a tuple of ndarray # is produced by np.where, as in t[np.where(t['a'] > 2)] # For all, a new table is constructed with slice of all columns return self._new_from_slice(item) else: raise ValueError(f"Illegal type {type(item)} for table item access") def __setitem__(self, item, value): # If the item is a string then it must be the name of a column. # If that column doesn't already exist then create it now. if isinstance(item, str) and item not in self.colnames: self.add_column(value, name=item, copy=True) else: n_cols = len(self.columns) if isinstance(item, str): # Set an existing column by first trying to replace, and if # this fails do an in-place update. See definition of mask # property for discussion of the _setitem_inplace attribute. if ( not getattr(self, "_setitem_inplace", False) and not conf.replace_inplace ): try: self._replace_column_warnings(item, value) return except Exception: pass self.columns[item][:] = value elif isinstance(item, (int, np.integer)): self._set_row(idx=item, colnames=self.colnames, vals=value) elif ( isinstance(item, (slice, np.ndarray, list)) or isinstance(item, tuple) and all(isinstance(x, np.ndarray) for x in item) ): if isinstance(value, Table): vals = (col for col in value.columns.values()) elif isinstance(value, np.ndarray) and value.dtype.names: vals = (value[name] for name in value.dtype.names) elif np.isscalar(value): vals = itertools.repeat(value, n_cols) else: # Assume this is an iterable that will work if len(value) != n_cols: raise ValueError( "Right side value needs {} elements (one for each column)".format( n_cols ) ) vals = value for col, val in zip(self.columns.values(), vals): col[item] = val else: raise ValueError(f"Illegal type {type(item)} for table item access") def __delitem__(self, item): if isinstance(item, str): self.remove_column(item) elif isinstance(item, (int, np.integer)): self.remove_row(item) elif isinstance(item, (list, tuple, np.ndarray)) and all( isinstance(x, str) for x in item ): self.remove_columns(item) elif ( isinstance(item, (list, np.ndarray)) and np.asarray(item).dtype.kind == "i" ): self.remove_rows(item) elif isinstance(item, slice): self.remove_rows(item) else: raise IndexError("illegal key or index value") def _ipython_key_completions_(self): return self.colnames def field(self, item): """Return column[item] for recarray compatibility.""" return self.columns[item] @property def masked(self): return self._masked @masked.setter def masked(self, masked): raise Exception( "Masked attribute is read-only (use t = Table(t, masked=True)" " to convert to a masked table)" ) def _set_masked(self, masked): """ Set the table masked property. Parameters ---------- masked : bool State of table masking (`True` or `False`) """ if masked in [True, False, None]: self._masked = masked else: raise ValueError("masked should be one of True, False, None") self._column_class = self.MaskedColumn if self._masked else self.Column @property def ColumnClass(self): if self._column_class is None: return self.Column else: return self._column_class @property def dtype(self): return np.dtype([descr(col) for col in self.columns.values()]) @property def colnames(self): return list(self.columns.keys()) @staticmethod def _is_list_or_tuple_of_str(names): """Check that ``names`` is a tuple or list of strings.""" return ( isinstance(names, (tuple, list)) and names and all(isinstance(x, str) for x in names) ) def keys(self): return list(self.columns.keys()) def values(self): return self.columns.values() def items(self): return self.columns.items() def __len__(self): # For performance reasons (esp. in Row) cache the first column name # and use that subsequently for the table length. If might not be # available yet or the column might be gone now, in which case # try again in the except block. try: return len(OrderedDict.__getitem__(self.columns, self._first_colname)) except (AttributeError, KeyError): if len(self.columns) == 0: return 0 # Get the first column name self._first_colname = next(iter(self.columns)) return len(self.columns[self._first_colname]) def __or__(self, other): if isinstance(other, Table): updated_table = self.copy() updated_table.update(other) return updated_table else: return NotImplemented def __ior__(self, other): try: self.update(other) return self except TypeError: return NotImplemented def index_column(self, name): """ Return the positional index of column ``name``. Parameters ---------- name : str column name Returns ------- index : int Positional index of column ``name``. Examples -------- Create a table with three columns 'a', 'b' and 'c':: >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']], ... names=('a', 'b', 'c')) >>> print(t) a b c --- --- --- 1 0.1 x 2 0.2 y 3 0.3 z Get index of column 'b' of the table:: >>> t.index_column('b') 1 """ try: return self.colnames.index(name) except ValueError: raise ValueError(f"Column {name} does not exist") def add_column( self, col, index=None, name=None, rename_duplicate=False, copy=True, default_name=None, ): """ Add a new column to the table using ``col`` as input. If ``index`` is supplied then insert column before ``index`` position in the list of columns, otherwise append column to the end of the list. The ``col`` input can be any data object which is acceptable as a `~astropy.table.Table` column object or can be converted. This includes mixin columns and scalar or length=1 objects which get broadcast to match the table length. To add several columns at once use ``add_columns()`` or simply call ``add_column()`` for each one. There is very little performance difference in the two approaches. Parameters ---------- col : object Data object for the new column index : int or None Insert column before this position or at end (default). name : str Column name rename_duplicate : bool Uniquify column name if it already exist. Default is False. copy : bool Make a copy of the new column. Default is True. default_name : str or None Name to use if both ``name`` and ``col.info.name`` are not available. Defaults to ``col{number_of_columns}``. Examples -------- Create a table with two columns 'a' and 'b', then create a third column 'c' and append it to the end of the table:: >>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b')) >>> col_c = Column(name='c', data=['x', 'y']) >>> t.add_column(col_c) >>> print(t) a b c --- --- --- 1 0.1 x 2 0.2 y Add column 'd' at position 1. Note that the column is inserted before the given index:: >>> t.add_column(['a', 'b'], name='d', index=1) >>> print(t) a d b c --- --- --- --- 1 a 0.1 x 2 b 0.2 y Add second column named 'b' with rename_duplicate:: >>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b')) >>> t.add_column(1.1, name='b', rename_duplicate=True) >>> print(t) a b b_1 --- --- --- 1 0.1 1.1 2 0.2 1.1 Add an unnamed column or mixin object in the table using a default name or by specifying an explicit name with ``name``. Name can also be overridden:: >>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b')) >>> t.add_column(['a', 'b']) >>> t.add_column(col_c, name='d') >>> print(t) a b col2 d --- --- ---- --- 1 0.1 a x 2 0.2 b y """ if default_name is None: default_name = f"col{len(self.columns)}" # Convert col data to acceptable object for insertion into self.columns. # Note that along with the lines above and below, this allows broadcasting # of scalars to the correct shape for adding to table. col = self._convert_data_to_col( col, name=name, copy=copy, default_name=default_name ) # Assigning a scalar column to an empty table should result in an # exception (see #3811). if col.shape == () and len(self) == 0: raise TypeError("Empty table cannot have column set to scalar value") # Make col data shape correct for scalars. The second test is to allow # broadcasting an N-d element to a column, e.g. t['new'] = [[1, 2]]. elif (col.shape == () or col.shape[0] == 1) and len(self) > 0: new_shape = (len(self),) + getattr(col, "shape", ())[1:] if isinstance(col, np.ndarray): col = np.broadcast_to(col, shape=new_shape, subok=True) elif isinstance(col, ShapedLikeNDArray): col = col._apply(np.broadcast_to, shape=new_shape, subok=True) # broadcast_to() results in a read-only array. Apparently it only changes # the view to look like the broadcasted array. So copy. col = col_copy(col) name = col.info.name # Ensure that new column is the right length if len(self.columns) > 0 and len(col) != len(self): raise ValueError("Inconsistent data column lengths") if rename_duplicate: orig_name = name i = 1 while name in self.columns: # Iterate until a unique name is found name = orig_name + "_" + str(i) i += 1 col.info.name = name # Set col parent_table weakref and ensure col has mask attribute if table.masked self._set_col_parent_table_and_mask(col) # Add new column as last column self.columns[name] = col if index is not None: # Move the other cols to the right of the new one move_names = self.colnames[index:-1] for move_name in move_names: self.columns.move_to_end(move_name, last=True) def add_columns( self, cols, indexes=None, names=None, copy=True, rename_duplicate=False ): """ Add a list of new columns the table using ``cols`` data objects. If a corresponding list of ``indexes`` is supplied then insert column before each ``index`` position in the *original* list of columns, otherwise append columns to the end of the list. The ``cols`` input can include any data objects which are acceptable as `~astropy.table.Table` column objects or can be converted. This includes mixin columns and scalar or length=1 objects which get broadcast to match the table length. From a performance perspective there is little difference between calling this method once or looping over the new columns and calling ``add_column()`` for each column. Parameters ---------- cols : list of object List of data objects for the new columns indexes : list of int or None Insert column before this position or at end (default). names : list of str Column names copy : bool Make a copy of the new columns. Default is True. rename_duplicate : bool Uniquify new column names if they duplicate the existing ones. Default is False. See Also -------- astropy.table.hstack, update, replace_column Examples -------- Create a table with two columns 'a' and 'b', then create columns 'c' and 'd' and append them to the end of the table:: >>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b')) >>> col_c = Column(name='c', data=['x', 'y']) >>> col_d = Column(name='d', data=['u', 'v']) >>> t.add_columns([col_c, col_d]) >>> print(t) a b c d --- --- --- --- 1 0.1 x u 2 0.2 y v Add column 'c' at position 0 and column 'd' at position 1. Note that the columns are inserted before the given position:: >>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b')) >>> t.add_columns([['x', 'y'], ['u', 'v']], names=['c', 'd'], ... indexes=[0, 1]) >>> print(t) c a d b --- --- --- --- x 1 u 0.1 y 2 v 0.2 Add second column 'b' and column 'c' with ``rename_duplicate``:: >>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b')) >>> t.add_columns([[1.1, 1.2], ['x', 'y']], names=('b', 'c'), ... rename_duplicate=True) >>> print(t) a b b_1 c --- --- --- --- 1 0.1 1.1 x 2 0.2 1.2 y Add unnamed columns or mixin objects in the table using default names or by specifying explicit names with ``names``. Names can also be overridden:: >>> t = Table() >>> col_b = Column(name='b', data=['u', 'v']) >>> t.add_columns([[1, 2], col_b]) >>> t.add_columns([[3, 4], col_b], names=['c', 'd']) >>> print(t) col0 b c d ---- --- --- --- 1 u 3 u 2 v 4 v """ if indexes is None: indexes = [len(self.columns)] * len(cols) elif len(indexes) != len(cols): raise ValueError("Number of indexes must match number of cols") if names is None: names = (None,) * len(cols) elif len(names) != len(cols): raise ValueError("Number of names must match number of cols") default_names = [f"col{ii + len(self.columns)}" for ii in range(len(cols))] for ii in reversed(np.argsort(indexes, kind="stable")): self.add_column( cols[ii], index=indexes[ii], name=names[ii], default_name=default_names[ii], rename_duplicate=rename_duplicate, copy=copy, ) def _replace_column_warnings(self, name, col): """ Same as replace_column but issues warnings under various circumstances. """ warns = conf.replace_warnings refcount = None old_col = None # sys.getrefcount is CPython specific and not on PyPy. if ( "refcount" in warns and name in self.colnames and hasattr(sys, "getrefcount") ): refcount = sys.getrefcount(self[name]) if name in self.colnames: old_col = self[name] # This may raise an exception (e.g. t['a'] = 1) in which case none of # the downstream code runs. self.replace_column(name, col) if "always" in warns: warnings.warn( f"replaced column '{name}'", TableReplaceWarning, stacklevel=3 ) if "slice" in warns: try: # Check for ndarray-subclass slice. An unsliced instance # has an ndarray for the base while sliced has the same class # as parent. if isinstance(old_col.base, old_col.__class__): msg = ( "replaced column '{}' which looks like an array slice. " "The new column no longer shares memory with the " "original array.".format(name) ) warnings.warn(msg, TableReplaceWarning, stacklevel=3) except AttributeError: pass # sys.getrefcount is CPython specific and not on PyPy. if "refcount" in warns and hasattr(sys, "getrefcount"): # Did reference count change? new_refcount = sys.getrefcount(self[name]) if refcount != new_refcount: msg = ( "replaced column '{}' and the number of references " "to the column changed.".format(name) ) warnings.warn(msg, TableReplaceWarning, stacklevel=3) if "attributes" in warns: # Any of the standard column attributes changed? changed_attrs = [] new_col = self[name] # Check base DataInfo attributes that any column will have for attr in DataInfo.attr_names: if getattr(old_col.info, attr) != getattr(new_col.info, attr): changed_attrs.append(attr) if changed_attrs: msg = "replaced column '{}' and column attributes {} changed.".format( name, changed_attrs ) warnings.warn(msg, TableReplaceWarning, stacklevel=3) def replace_column(self, name, col, copy=True): """ Replace column ``name`` with the new ``col`` object. The behavior of ``copy`` for Column objects is: - copy=True: new class instance with a copy of data and deep copy of meta - copy=False: new class instance with same data and a key-only copy of meta For mixin columns: - copy=True: new class instance with copy of data and deep copy of meta - copy=False: original instance (no copy at all) Parameters ---------- name : str Name of column to replace col : `~astropy.table.Column` or `~numpy.ndarray` or sequence New column object to replace the existing column. copy : bool Make copy of the input ``col``, default=True See Also -------- add_columns, astropy.table.hstack, update Examples -------- Replace column 'a' with a float version of itself:: >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b')) >>> float_a = t['a'].astype(float) >>> t.replace_column('a', float_a) """ if name not in self.colnames: raise ValueError(f"column name {name} is not in the table") if self[name].info.indices: raise ValueError("cannot replace a table index column") col = self._convert_data_to_col(col, name=name, copy=copy) self._set_col_parent_table_and_mask(col) # Ensure that new column is the right length, unless it is the only column # in which case re-sizing is allowed. if len(self.columns) > 1 and len(col) != len(self[name]): raise ValueError("length of new column must match table length") self.columns.__setitem__(name, col, validated=True) def remove_row(self, index): """ Remove a row from the table. Parameters ---------- index : int Index of row to remove Examples -------- Create a table with three columns 'a', 'b' and 'c':: >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']], ... names=('a', 'b', 'c')) >>> print(t) a b c --- --- --- 1 0.1 x 2 0.2 y 3 0.3 z Remove row 1 from the table:: >>> t.remove_row(1) >>> print(t) a b c --- --- --- 1 0.1 x 3 0.3 z To remove several rows at the same time use remove_rows. """ # check the index against the types that work with np.delete if not isinstance(index, (int, np.integer)): raise TypeError("Row index must be an integer") self.remove_rows(index) def remove_rows(self, row_specifier): """ Remove rows from the table. Parameters ---------- row_specifier : slice or int or array of int Specification for rows to remove Examples -------- Create a table with three columns 'a', 'b' and 'c':: >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']], ... names=('a', 'b', 'c')) >>> print(t) a b c --- --- --- 1 0.1 x 2 0.2 y 3 0.3 z Remove rows 0 and 2 from the table:: >>> t.remove_rows([0, 2]) >>> print(t) a b c --- --- --- 2 0.2 y Note that there are no warnings if the slice operator extends outside the data:: >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']], ... names=('a', 'b', 'c')) >>> t.remove_rows(slice(10, 20, 1)) >>> print(t) a b c --- --- --- 1 0.1 x 2 0.2 y 3 0.3 z """ # Update indices for index in self.indices: index.remove_rows(row_specifier) keep_mask = np.ones(len(self), dtype=bool) keep_mask[row_specifier] = False columns = self.TableColumns() for name, col in self.columns.items(): newcol = col[keep_mask] newcol.info.parent_table = self columns[name] = newcol self._replace_cols(columns) # Revert groups to default (ungrouped) state if hasattr(self, "_groups"): del self._groups def iterrows(self, *names): """ Iterate over rows of table returning a tuple of values for each row. This method is especially useful when only a subset of columns are needed. The ``iterrows`` method can be substantially faster than using the standard Table row iteration (e.g. ``for row in tbl:``), since that returns a new ``~astropy.table.Row`` object for each row and accessing a column in that row (e.g. ``row['col0']``) is slower than tuple access. Parameters ---------- names : list List of column names (default to all columns if no names provided) Returns ------- rows : iterable Iterator returns tuples of row values Examples -------- Create a table with three columns 'a', 'b' and 'c':: >>> t = Table({'a': [1, 2, 3], ... 'b': [1.0, 2.5, 3.0], ... 'c': ['x', 'y', 'z']}) To iterate row-wise using column names:: >>> for a, c in t.iterrows('a', 'c'): ... print(a, c) 1 x 2 y 3 z """ if len(names) == 0: names = self.colnames else: for name in names: if name not in self.colnames: raise ValueError(f"{name} is not a valid column name") cols = (self[name] for name in names) out = zip(*cols) return out def _set_of_names_in_colnames(self, names): """Return ``names`` as a set if valid, or raise a `KeyError`. ``names`` is valid if all elements in it are in ``self.colnames``. If ``names`` is a string then it is interpreted as a single column name. """ names = {names} if isinstance(names, str) else set(names) invalid_names = names.difference(self.colnames) if len(invalid_names) == 1: raise KeyError(f'column "{invalid_names.pop()}" does not exist') elif len(invalid_names) > 1: raise KeyError(f"columns {invalid_names} do not exist") return names def remove_column(self, name): """ Remove a column from the table. This can also be done with:: del table[name] Parameters ---------- name : str Name of column to remove Examples -------- Create a table with three columns 'a', 'b' and 'c':: >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']], ... names=('a', 'b', 'c')) >>> print(t) a b c --- --- --- 1 0.1 x 2 0.2 y 3 0.3 z Remove column 'b' from the table:: >>> t.remove_column('b') >>> print(t) a c --- --- 1 x 2 y 3 z To remove several columns at the same time use remove_columns. """ self.remove_columns([name]) def remove_columns(self, names): """ Remove several columns from the table. Parameters ---------- names : str or iterable of str Names of the columns to remove Examples -------- Create a table with three columns 'a', 'b' and 'c':: >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']], ... names=('a', 'b', 'c')) >>> print(t) a b c --- --- --- 1 0.1 x 2 0.2 y 3 0.3 z Remove columns 'b' and 'c' from the table:: >>> t.remove_columns(['b', 'c']) >>> print(t) a --- 1 2 3 Specifying only a single column also works. Remove column 'b' from the table:: >>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']], ... names=('a', 'b', 'c')) >>> t.remove_columns('b') >>> print(t) a c --- --- 1 x 2 y 3 z This gives the same as using remove_column. """ for name in self._set_of_names_in_colnames(names): del self.columns[name] def _convert_string_dtype(self, in_kind, out_kind, encode_decode_func): """ Convert string-like columns to/from bytestring and unicode (internal only). Parameters ---------- in_kind : str Input dtype.kind out_kind : str Output dtype.kind """ for col in self.itercols(): if col.dtype.kind == in_kind: try: # This requires ASCII and is faster by a factor of up to ~8, so # try that first. newcol = col.__class__(col, dtype=out_kind) except (UnicodeEncodeError, UnicodeDecodeError): newcol = col.__class__(encode_decode_func(col, "utf-8")) # Quasi-manually copy info attributes. Unfortunately # DataInfo.__set__ does not do the right thing in this case # so newcol.info = col.info does not get the old info attributes. for attr in ( col.info.attr_names - col.info._attrs_no_copy - {"dtype"} ): value = deepcopy(getattr(col.info, attr)) setattr(newcol.info, attr, value) self[col.name] = newcol def convert_bytestring_to_unicode(self): """ Convert bytestring columns (dtype.kind='S') to unicode (dtype.kind='U') using UTF-8 encoding. Internally this changes string columns to represent each character in the string with a 4-byte UCS-4 equivalent, so it is inefficient for memory but allows scripts to manipulate string arrays with natural syntax. """ self._convert_string_dtype("S", "U", np.char.decode) def convert_unicode_to_bytestring(self): """ Convert unicode columns (dtype.kind='U') to bytestring (dtype.kind='S') using UTF-8 encoding. When exporting a unicode string array to a file, it may be desirable to encode unicode columns as bytestrings. """ self._convert_string_dtype("U", "S", np.char.encode) def keep_columns(self, names): """ Keep only the columns specified (remove the others). Parameters ---------- names : str or iterable of str The columns to keep. All other columns will be removed. Examples -------- Create a table with three columns 'a', 'b' and 'c':: >>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']], ... names=('a', 'b', 'c')) >>> print(t) a b c --- --- --- 1 0.1 x 2 0.2 y 3 0.3 z Keep only column 'a' of the table:: >>> t.keep_columns('a') >>> print(t) a --- 1 2 3 Keep columns 'a' and 'c' of the table:: >>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']], ... names=('a', 'b', 'c')) >>> t.keep_columns(['a', 'c']) >>> print(t) a c --- --- 1 x 2 y 3 z """ names = self._set_of_names_in_colnames(names) for colname in self.colnames: if colname not in names: del self.columns[colname] def rename_column(self, name, new_name): """ Rename a column. This can also be done directly by setting the ``name`` attribute of the ``info`` property of the column:: table[name].info.name = new_name Parameters ---------- name : str The current name of the column. new_name : str The new name for the column Examples -------- Create a table with three columns 'a', 'b' and 'c':: >>> t = Table([[1,2],[3,4],[5,6]], names=('a','b','c')) >>> print(t) a b c --- --- --- 1 3 5 2 4 6 Renaming column 'a' to 'aa':: >>> t.rename_column('a' , 'aa') >>> print(t) aa b c --- --- --- 1 3 5 2 4 6 """ if name not in self.keys(): raise KeyError(f"Column {name} does not exist") self.columns[name].info.name = new_name def rename_columns(self, names, new_names): """ Rename multiple columns. Parameters ---------- names : list, tuple A list or tuple of existing column names. new_names : list, tuple A list or tuple of new column names. Examples -------- Create a table with three columns 'a', 'b', 'c':: >>> t = Table([[1,2],[3,4],[5,6]], names=('a','b','c')) >>> print(t) a b c --- --- --- 1 3 5 2 4 6 Renaming columns 'a' to 'aa' and 'b' to 'bb':: >>> names = ('a','b') >>> new_names = ('aa','bb') >>> t.rename_columns(names, new_names) >>> print(t) aa bb c --- --- --- 1 3 5 2 4 6 """ if not self._is_list_or_tuple_of_str(names): raise TypeError("input 'names' must be a tuple or a list of column names") if not self._is_list_or_tuple_of_str(new_names): raise TypeError( "input 'new_names' must be a tuple or a list of column names" ) if len(names) != len(new_names): raise ValueError( "input 'names' and 'new_names' list arguments must be the same length" ) for name, new_name in zip(names, new_names): self.rename_column(name, new_name) def _set_row(self, idx, colnames, vals): try: assert len(vals) == len(colnames) except Exception: raise ValueError( "right hand side must be a sequence of values with " "the same length as the number of selected columns" ) # Keep track of original values before setting each column so that # setting row can be transactional. orig_vals = [] cols = self.columns try: for name, val in zip(colnames, vals): orig_vals.append(cols[name][idx]) cols[name][idx] = val except Exception: # If anything went wrong first revert the row update then raise for name, val in zip(colnames, orig_vals[:-1]): cols[name][idx] = val raise def add_row(self, vals=None, mask=None): """Add a new row to the end of the table. The ``vals`` argument can be: sequence (e.g. tuple or list) Column values in the same order as table columns. mapping (e.g. dict) Keys corresponding to column names. Missing values will be filled with np.zeros for the column dtype. `None` All values filled with np.zeros for the column dtype. This method requires that the Table object "owns" the underlying array data. In particular one cannot add a row to a Table that was initialized with copy=False from an existing array. The ``mask`` attribute should give (if desired) the mask for the values. The type of the mask should match that of the values, i.e. if ``vals`` is an iterable, then ``mask`` should also be an iterable with the same length, and if ``vals`` is a mapping, then ``mask`` should be a dictionary. Parameters ---------- vals : tuple, list, dict or None Use the specified values in the new row mask : tuple, list, dict or None Use the specified mask values in the new row Examples -------- Create a table with three columns 'a', 'b' and 'c':: >>> t = Table([[1,2],[4,5],[7,8]], names=('a','b','c')) >>> print(t) a b c --- --- --- 1 4 7 2 5 8 Adding a new row with entries '3' in 'a', '6' in 'b' and '9' in 'c':: >>> t.add_row([3,6,9]) >>> print(t) a b c --- --- --- 1 4 7 2 5 8 3 6 9 """ self.insert_row(len(self), vals, mask) def insert_row(self, index, vals=None, mask=None): """Add a new row before the given ``index`` position in the table. The ``vals`` argument can be: sequence (e.g. tuple or list) Column values in the same order as table columns. mapping (e.g. dict) Keys corresponding to column names. Missing values will be filled with np.zeros for the column dtype. `None` All values filled with np.zeros for the column dtype. The ``mask`` attribute should give (if desired) the mask for the values. The type of the mask should match that of the values, i.e. if ``vals`` is an iterable, then ``mask`` should also be an iterable with the same length, and if ``vals`` is a mapping, then ``mask`` should be a dictionary. Parameters ---------- vals : tuple, list, dict or None Use the specified values in the new row mask : tuple, list, dict or None Use the specified mask values in the new row """ colnames = self.colnames N = len(self) if index < -N or index > N: raise IndexError( f"Index {index} is out of bounds for table with length {N}" ) if index < 0: index += N if isinstance(vals, Mapping) or vals is None: # From the vals and/or mask mappings create the corresponding lists # that have entries for each table column. if mask is not None and not isinstance(mask, Mapping): raise TypeError("Mismatch between type of vals and mask") # Now check that the mask is specified for the same keys as the # values, otherwise things get really confusing. if mask is not None and set(vals.keys()) != set(mask.keys()): raise ValueError("keys in mask should match keys in vals") if vals and any(name not in colnames for name in vals): raise ValueError("Keys in vals must all be valid column names") vals_list = [] mask_list = [] for name in colnames: if vals and name in vals: vals_list.append(vals[name]) mask_list.append(False if mask is None else mask[name]) else: col = self[name] if hasattr(col, "dtype"): # Make a placeholder zero element of the right type which is masked. # This assumes the appropriate insert() method will broadcast a # numpy scalar to the right shape. vals_list.append(np.zeros(shape=(), dtype=col.dtype)) # For masked table any unsupplied values are masked by default. mask_list.append(self.masked and vals is not None) else: raise ValueError(f"Value must be supplied for column '{name}'") vals = vals_list mask = mask_list if isiterable(vals): if mask is not None and (not isiterable(mask) or isinstance(mask, Mapping)): raise TypeError("Mismatch between type of vals and mask") if len(self.columns) != len(vals): raise ValueError("Mismatch between number of vals and columns") if mask is not None: if len(self.columns) != len(mask): raise ValueError("Mismatch between number of masks and columns") else: mask = [False] * len(self.columns) else: raise TypeError("Vals must be an iterable or mapping or None") # Insert val at index for each column columns = self.TableColumns() for name, col, val, mask_ in zip(colnames, self.columns.values(), vals, mask): try: # If new val is masked and the existing column does not support masking # then upgrade the column to a mask-enabled type: either the table-level # default ColumnClass or else MaskedColumn. if ( mask_ and isinstance(col, Column) and not isinstance(col, MaskedColumn) ): col_cls = ( self.ColumnClass if issubclass(self.ColumnClass, self.MaskedColumn) else self.MaskedColumn ) col = col_cls(col, copy=False) newcol = col.insert(index, val, axis=0) if len(newcol) != N + 1: raise ValueError( "Incorrect length for column {} after inserting {}" " (expected {}, got {})".format(name, val, len(newcol), N + 1) ) newcol.info.parent_table = self # Set mask if needed and possible if mask_: if hasattr(newcol, "mask"): newcol[index] = np.ma.masked else: raise TypeError( "mask was supplied for column '{}' but it does not " "support masked values".format(col.info.name) ) columns[name] = newcol except Exception as err: raise ValueError( "Unable to insert row because of exception in column '{}':\n{}".format( name, err ) ) from err for table_index in self.indices: table_index.insert_row(index, vals, self.columns.values()) self._replace_cols(columns) # Revert groups to default (ungrouped) state if hasattr(self, "_groups"): del self._groups def _replace_cols(self, columns): for col, new_col in zip(self.columns.values(), columns.values()): new_col.info.indices = [] for index in col.info.indices: index.columns[index.col_position(col.info.name)] = new_col new_col.info.indices.append(index) self.columns = columns def update(self, other, copy=True): """ Perform a dictionary-style update and merge metadata. The argument ``other`` must be a |Table|, or something that can be used to initialize a table. Columns from (possibly converted) ``other`` are added to this table. In case of matching column names the column from this table is replaced with the one from ``other``. If ``other`` is a |Table| instance then ``|=`` is available as alternate syntax for in-place update and ``|`` can be used merge data to a new table. Parameters ---------- other : table-like Data to update this table with. copy : bool Whether the updated columns should be copies of or references to the originals. See Also -------- add_columns, astropy.table.hstack, replace_column Examples -------- Update a table with another table:: >>> t1 = Table({'a': ['foo', 'bar'], 'b': [0., 0.]}, meta={'i': 0}) >>> t2 = Table({'b': [1., 2.], 'c': [7., 11.]}, meta={'n': 2}) >>> t1.update(t2) >>> t1 <Table length=2> a b c str3 float64 float64 ---- ------- ------- foo 1.0 7.0 bar 2.0 11.0 >>> t1.meta {'i': 0, 'n': 2} Update a table with a dictionary:: >>> t = Table({'a': ['foo', 'bar'], 'b': [0., 0.]}) >>> t.update({'b': [1., 2.]}) >>> t <Table length=2> a b str3 float64 ---- ------- foo 1.0 bar 2.0 """ from .operations import _merge_table_meta if not isinstance(other, Table): other = self.__class__(other, copy=copy) common_cols = set(self.colnames).intersection(other.colnames) for name, col in other.items(): if name in common_cols: self.replace_column(name, col, copy=copy) else: self.add_column(col, name=name, copy=copy) _merge_table_meta(self, [self, other], metadata_conflicts="silent") def argsort(self, keys=None, kind=None, reverse=False): """ Return the indices which would sort the table according to one or more key columns. This simply calls the `numpy.argsort` function on the table with the ``order`` parameter set to ``keys``. Parameters ---------- keys : str or list of str The column name(s) to order the table by kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional Sorting algorithm used by ``numpy.argsort``. reverse : bool Sort in reverse order (default=False) Returns ------- index_array : ndarray, int Array of indices that sorts the table by the specified key column(s). """ if isinstance(keys, str): keys = [keys] # use index sorted order if possible if keys is not None: index = get_index(self, names=keys) if index is not None: idx = np.asarray(index.sorted_data()) return idx[::-1] if reverse else idx kwargs = {} if keys: # For multiple keys return a structured array which gets sorted, # while for a single key return a single ndarray. Sorting a # one-column structured array is slower than ndarray (e.g. a # factor of ~6 for a 10 million long random array), and much slower # for in principle sortable columns like Time, which get stored as # object arrays. if len(keys) > 1: kwargs["order"] = keys data = self.as_array(names=keys) else: data = self[keys[0]] else: # No keys provided so sort on all columns. data = self.as_array() if kind: kwargs["kind"] = kind # np.argsort will look for a possible .argsort method (e.g., for Time), # and if that fails cast to an array and try sorting that way. idx = np.argsort(data, **kwargs) return idx[::-1] if reverse else idx def sort(self, keys=None, *, kind=None, reverse=False): """ Sort the table according to one or more keys. This operates on the existing table and does not return a new table. Parameters ---------- keys : str or list of str The key(s) to order the table by. If None, use the primary index of the Table. kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional Sorting algorithm used by ``numpy.argsort``. reverse : bool Sort in reverse order (default=False) Examples -------- Create a table with 3 columns:: >>> t = Table([['Max', 'Jo', 'John'], ['Miller', 'Miller', 'Jackson'], ... [12, 15, 18]], names=('firstname', 'name', 'tel')) >>> print(t) firstname name tel --------- ------- --- Max Miller 12 Jo Miller 15 John Jackson 18 Sorting according to standard sorting rules, first 'name' then 'firstname':: >>> t.sort(['name', 'firstname']) >>> print(t) firstname name tel --------- ------- --- John Jackson 18 Jo Miller 15 Max Miller 12 Sorting according to standard sorting rules, first 'firstname' then 'tel', in reverse order:: >>> t.sort(['firstname', 'tel'], reverse=True) >>> print(t) firstname name tel --------- ------- --- Max Miller 12 John Jackson 18 Jo Miller 15 """ if keys is None: if not self.indices: raise ValueError("Table sort requires input keys or a table index") keys = [x.info.name for x in self.indices[0].columns] if isinstance(keys, str): keys = [keys] indexes = self.argsort(keys, kind=kind, reverse=reverse) with self.index_mode("freeze"): for name, col in self.columns.items(): # Make a new sorted column. This requires that take() also copies # relevant info attributes for mixin columns. new_col = col.take(indexes, axis=0) # First statement in try: will succeed if the column supports an in-place # update, and matches the legacy behavior of astropy Table. However, # some mixin classes may not support this, so in that case just drop # in the entire new column. See #9553 and #9536 for discussion. try: col[:] = new_col except Exception: # In-place update failed for some reason, exception class not # predictable for arbitrary mixin. self[col.info.name] = new_col def reverse(self): """ Reverse the row order of table rows. The table is reversed in place and there are no function arguments. Examples -------- Create a table with three columns:: >>> t = Table([['Max', 'Jo', 'John'], ['Miller','Miller','Jackson'], ... [12,15,18]], names=('firstname','name','tel')) >>> print(t) firstname name tel --------- ------- --- Max Miller 12 Jo Miller 15 John Jackson 18 Reversing order:: >>> t.reverse() >>> print(t) firstname name tel --------- ------- --- John Jackson 18 Jo Miller 15 Max Miller 12 """ for col in self.columns.values(): # First statement in try: will succeed if the column supports an in-place # update, and matches the legacy behavior of astropy Table. However, # some mixin classes may not support this, so in that case just drop # in the entire new column. See #9836, #9553, and #9536 for discussion. new_col = col[::-1] try: col[:] = new_col except Exception: # In-place update failed for some reason, exception class not # predictable for arbitrary mixin. self[col.info.name] = new_col for index in self.indices: index.reverse() def round(self, decimals=0): """ Round numeric columns in-place to the specified number of decimals. Non-numeric columns will be ignored. Examples -------- Create three columns with different types: >>> t = Table([[1, 4, 5], [-25.55, 12.123, 85], ... ['a', 'b', 'c']], names=('a', 'b', 'c')) >>> print(t) a b c --- ------ --- 1 -25.55 a 4 12.123 b 5 85.0 c Round them all to 0: >>> t.round(0) >>> print(t) a b c --- ----- --- 1 -26.0 a 4 12.0 b 5 85.0 c Round column 'a' to -1 decimal: >>> t.round({'a':-1}) >>> print(t) a b c --- ----- --- 0 -26.0 a 0 12.0 b 0 85.0 c Parameters ---------- decimals: int, dict Number of decimals to round the columns to. If a dict is given, the columns will be rounded to the number specified as the value. If a certain column is not in the dict given, it will remain the same. """ if isinstance(decimals, Mapping): decimal_values = decimals.values() column_names = decimals.keys() elif isinstance(decimals, int): decimal_values = itertools.repeat(decimals) column_names = self.colnames else: raise ValueError("'decimals' argument must be an int or a dict") for colname, decimal in zip(column_names, decimal_values): col = self.columns[colname] if np.issubdtype(col.info.dtype, np.number): try: np.around(col, decimals=decimal, out=col) except TypeError: # Bug in numpy see https://github.com/numpy/numpy/issues/15438 col[()] = np.around(col, decimals=decimal) def copy(self, copy_data=True): """ Return a copy of the table. Parameters ---------- copy_data : bool If `True` (the default), copy the underlying data array. Otherwise, use the same data array. The ``meta`` is always deepcopied regardless of the value for ``copy_data``. """ out = self.__class__(self, copy=copy_data) # If the current table is grouped then do the same in the copy if hasattr(self, "_groups"): out._groups = groups.TableGroups( out, indices=self._groups._indices, keys=self._groups._keys ) return out def __deepcopy__(self, memo=None): return self.copy(True) def __copy__(self): return self.copy(False) def __lt__(self, other): return super().__lt__(other) def __gt__(self, other): return super().__gt__(other) def __le__(self, other): return super().__le__(other) def __ge__(self, other): return super().__ge__(other) def __eq__(self, other): return self._rows_equal(other) def __ne__(self, other): return ~self.__eq__(other) def _rows_equal(self, other): """ Row-wise comparison of table with any other object. This is actual implementation for __eq__. Returns a 1-D boolean numpy array showing result of row-wise comparison. This is the same as the ``==`` comparison for tables. Parameters ---------- other : Table or DataFrame or ndarray An object to compare with table Examples -------- Comparing one Table with other:: >>> t1 = Table([[1,2],[4,5],[7,8]], names=('a','b','c')) >>> t2 = Table([[1,2],[4,5],[7,8]], names=('a','b','c')) >>> t1._rows_equal(t2) array([ True, True]) """ if isinstance(other, Table): other = other.as_array() if self.has_masked_columns: if isinstance(other, np.ma.MaskedArray): result = self.as_array() == other else: # If mask is True, then by definition the row doesn't match # because the other array is not masked. false_mask = np.zeros(1, dtype=[(n, bool) for n in self.dtype.names]) result = (self.as_array().data == other) & (self.mask == false_mask) else: if isinstance(other, np.ma.MaskedArray): # If mask is True, then by definition the row doesn't match # because the other array is not masked. false_mask = np.zeros(1, dtype=[(n, bool) for n in other.dtype.names]) result = (self.as_array() == other.data) & (other.mask == false_mask) else: result = self.as_array() == other return result def values_equal(self, other): """ Element-wise comparison of table with another table, list, or scalar. Returns a ``Table`` with the same columns containing boolean values showing result of comparison. Parameters ---------- other : table-like object or list or scalar Object to compare with table Examples -------- Compare one Table with other:: >>> t1 = Table([[1, 2], [4, 5], [-7, 8]], names=('a', 'b', 'c')) >>> t2 = Table([[1, 2], [-4, 5], [7, 8]], names=('a', 'b', 'c')) >>> t1.values_equal(t2) <Table length=2> a b c bool bool bool ---- ----- ----- True False False True True True """ if isinstance(other, Table): names = other.colnames else: try: other = Table(other, copy=False) names = other.colnames except Exception: # Broadcast other into a dict, so e.g. other = 2 will turn into # other = {'a': 2, 'b': 2} and then equality does a # column-by-column broadcasting. names = self.colnames other = {name: other for name in names} # Require column names match but do not require same column order if set(self.colnames) != set(names): raise ValueError("cannot compare tables with different column names") eqs = [] for name in names: try: np.broadcast(self[name], other[name]) # Check if broadcast-able # Catch the numpy FutureWarning related to equality checking, # "elementwise comparison failed; returning scalar instead, but # in the future will perform elementwise comparison". Turn this # into an exception since the scalar answer is not what we want. with warnings.catch_warnings(record=True) as warns: warnings.simplefilter("always") eq = self[name] == other[name] if ( warns and issubclass(warns[-1].category, FutureWarning) and "elementwise comparison failed" in str(warns[-1].message) ): raise FutureWarning(warns[-1].message) except Exception as err: raise ValueError(f"unable to compare column {name}") from err # Be strict about the result from the comparison. E.g. SkyCoord __eq__ is just # broken and completely ignores that it should return an array. if not ( isinstance(eq, np.ndarray) and eq.dtype is np.dtype("bool") and len(eq) == len(self) ): raise TypeError( f"comparison for column {name} returned {eq} " "instead of the expected boolean ndarray" ) eqs.append(eq) out = Table(eqs, names=names) return out @property def groups(self): if not hasattr(self, "_groups"): self._groups = groups.TableGroups(self) return self._groups def group_by(self, keys): """ Group this table by the specified ``keys``. This effectively splits the table into groups which correspond to unique values of the ``keys`` grouping object. The output is a new `~astropy.table.TableGroups` which contains a copy of this table but sorted by row according to ``keys``. The ``keys`` input to `group_by` can be specified in different ways: - String or list of strings corresponding to table column name(s) - Numpy array (homogeneous or structured) with same length as this table - `~astropy.table.Table` with same length as this table Parameters ---------- keys : str, list of str, numpy array, or `~astropy.table.Table` Key grouping object Returns ------- out : `~astropy.table.Table` New table with groups set """ return groups.table_group_by(self, keys) def to_pandas(self, index=None, use_nullable_int=True): """ Return a :class:`pandas.DataFrame` instance. The index of the created DataFrame is controlled by the ``index`` argument. For ``index=True`` or the default ``None``, an index will be specified for the DataFrame if there is a primary key index on the Table *and* if it corresponds to a single column. If ``index=False`` then no DataFrame index will be specified. If ``index`` is the name of a column in the table then that will be the DataFrame index. In addition to vanilla columns or masked columns, this supports Table mixin columns like Quantity, Time, or SkyCoord. In many cases these objects have no analog in pandas and will be converted to a "encoded" representation using only Column or MaskedColumn. The exception is Time or TimeDelta columns, which will be converted to the corresponding representation in pandas using ``np.datetime64`` or ``np.timedelta64``. See the example below. Parameters ---------- index : None, bool, str Specify DataFrame index mode use_nullable_int : bool, default=True Convert integer MaskedColumn to pandas nullable integer type. If ``use_nullable_int=False`` or the pandas version does not support nullable integer types (version < 0.24), then the column is converted to float with NaN for missing elements and a warning is issued. Returns ------- dataframe : :class:`pandas.DataFrame` A pandas :class:`pandas.DataFrame` instance Raises ------ ImportError If pandas is not installed ValueError If the Table has multi-dimensional columns Examples -------- Here we convert a table with a few mixins to a :class:`pandas.DataFrame` instance. >>> import pandas as pd >>> from astropy.table import QTable >>> import astropy.units as u >>> from astropy.time import Time, TimeDelta >>> from astropy.coordinates import SkyCoord >>> q = [1, 2] * u.m >>> tm = Time([1998, 2002], format='jyear') >>> sc = SkyCoord([5, 6], [7, 8], unit='deg') >>> dt = TimeDelta([3, 200] * u.s) >>> t = QTable([q, tm, sc, dt], names=['q', 'tm', 'sc', 'dt']) >>> df = t.to_pandas(index='tm') >>> with pd.option_context('display.max_columns', 20): ... print(df) q sc.ra sc.dec dt tm 1998-01-01 1.0 5.0 7.0 0 days 00:00:03 2002-01-01 2.0 6.0 8.0 0 days 00:03:20 """ from pandas import DataFrame, Series if index is not False: if index in (None, True): # Default is to use the table primary key if available and a single column if self.primary_key and len(self.primary_key) == 1: index = self.primary_key[0] else: index = False else: if index not in self.colnames: raise ValueError( "index must be None, False, True or a table column name" ) def _encode_mixins(tbl): """Encode a Table ``tbl`` that may have mixin columns to a Table with only astropy Columns + appropriate meta-data to allow subsequent decoding. """ from astropy.time import TimeBase, TimeDelta from . import serialize # Convert any Time or TimeDelta columns and pay attention to masking time_cols = [col for col in tbl.itercols() if isinstance(col, TimeBase)] if time_cols: # Make a light copy of table and clear any indices new_cols = [] for col in tbl.itercols(): new_col = ( col_copy(col, copy_indices=False) if col.info.indices else col ) new_cols.append(new_col) tbl = tbl.__class__(new_cols, copy=False) # Certain subclasses (e.g. TimeSeries) may generate new indices on # table creation, so make sure there are no indices on the table. for col in tbl.itercols(): col.info.indices.clear() for col in time_cols: if isinstance(col, TimeDelta): # Convert to nanoseconds (matches astropy datetime64 support) new_col = (col.sec * 1e9).astype("timedelta64[ns]") nat = np.timedelta64("NaT") else: new_col = col.datetime64.copy() nat = np.datetime64("NaT") if col.masked: new_col[col.mask] = nat tbl[col.info.name] = new_col # Convert the table to one with no mixins, only Column objects. encode_tbl = serialize.represent_mixins_as_columns(tbl) return encode_tbl tbl = _encode_mixins(self) badcols = [name for name, col in self.columns.items() if len(col.shape) > 1] if badcols: # fmt: off raise ValueError( f'Cannot convert a table with multidimensional columns to a ' f'pandas DataFrame. Offending columns are: {badcols}\n' f'One can filter out such columns using:\n' f'names = [name for name in tbl.colnames if len(tbl[name].shape) <= 1]\n' f'tbl[names].to_pandas(...)' ) # fmt: on out = OrderedDict() for name, column in tbl.columns.items(): if getattr(column.dtype, "isnative", True): out[name] = column else: out[name] = column.data.byteswap().newbyteorder("=") if isinstance(column, MaskedColumn) and np.any(column.mask): if column.dtype.kind in ["i", "u"]: pd_dtype = column.dtype.name if use_nullable_int: # Convert int64 to Int64, uint32 to UInt32, etc for nullable types pd_dtype = pd_dtype.replace("i", "I").replace("u", "U") out[name] = Series(out[name], dtype=pd_dtype) # If pandas is older than 0.24 the type may have turned to float if column.dtype.kind != out[name].dtype.kind: warnings.warn( f"converted column '{name}' from {column.dtype} to" f" {out[name].dtype}", TableReplaceWarning, stacklevel=3, ) elif column.dtype.kind not in ["f", "c"]: out[name] = column.astype(object).filled(np.nan) kwargs = {} if index: idx = out.pop(index) kwargs["index"] = idx # We add the table index to Series inputs (MaskedColumn with int values) to override # its default RangeIndex, see #11432 for v in out.values(): if isinstance(v, Series): v.index = idx df = DataFrame(out, **kwargs) if index: # Explicitly set the pandas DataFrame index to the original table # index name. df.index.name = idx.info.name return df @classmethod def from_pandas(cls, dataframe, index=False, units=None): """ Create a `~astropy.table.Table` from a :class:`pandas.DataFrame` instance. In addition to converting generic numeric or string columns, this supports conversion of pandas Date and Time delta columns to `~astropy.time.Time` and `~astropy.time.TimeDelta` columns, respectively. Parameters ---------- dataframe : :class:`pandas.DataFrame` A pandas :class:`pandas.DataFrame` instance index : bool Include the index column in the returned table (default=False) units: dict A dict mapping column names to to a `~astropy.units.Unit`. The columns will have the specified unit in the Table. Returns ------- table : `~astropy.table.Table` A `~astropy.table.Table` (or subclass) instance Raises ------ ImportError If pandas is not installed Examples -------- Here we convert a :class:`pandas.DataFrame` instance to a `~astropy.table.QTable`. >>> import numpy as np >>> import pandas as pd >>> from astropy.table import QTable >>> time = pd.Series(['1998-01-01', '2002-01-01'], dtype='datetime64[ns]') >>> dt = pd.Series(np.array([1, 300], dtype='timedelta64[s]')) >>> df = pd.DataFrame({'time': time}) >>> df['dt'] = dt >>> df['x'] = [3., 4.] >>> with pd.option_context('display.max_columns', 20): ... print(df) time dt x 0 1998-01-01 0 days 00:00:01 3.0 1 2002-01-01 0 days 00:05:00 4.0 >>> QTable.from_pandas(df) <QTable length=2> time dt x Time TimeDelta float64 ----------------------- --------- ------- 1998-01-01T00:00:00.000 1.0 3.0 2002-01-01T00:00:00.000 300.0 4.0 """ out = OrderedDict() names = list(dataframe.columns) columns = [dataframe[name] for name in names] datas = [np.array(column) for column in columns] masks = [np.array(column.isnull()) for column in columns] if index: index_name = dataframe.index.name or "index" while index_name in names: index_name = "_" + index_name + "_" names.insert(0, index_name) columns.insert(0, dataframe.index) datas.insert(0, np.array(dataframe.index)) masks.insert(0, np.zeros(len(dataframe), dtype=bool)) if units is None: units = [None] * len(names) else: if not isinstance(units, Mapping): raise TypeError('Expected a Mapping "column-name" -> "unit"') not_found = set(units.keys()) - set(names) if not_found: warnings.warn(f"`units` contains additional columns: {not_found}") units = [units.get(name) for name in names] for name, column, data, mask, unit in zip(names, columns, datas, masks, units): if column.dtype.kind in ["u", "i"] and np.any(mask): # Special-case support for pandas nullable int np_dtype = str(column.dtype).lower() data = np.zeros(shape=column.shape, dtype=np_dtype) data[~mask] = column[~mask] out[name] = MaskedColumn( data=data, name=name, mask=mask, unit=unit, copy=False ) continue if data.dtype.kind == "O": # If all elements of an object array are string-like or np.nan # then coerce back to a native numpy str/unicode array. string_types = (str, bytes) nan = np.nan if all(isinstance(x, string_types) or x is nan for x in data): # Force any missing (null) values to b''. Numpy will # upcast to str/unicode as needed. data[mask] = b"" # When the numpy object array is represented as a list then # numpy initializes to the correct string or unicode type. data = np.array([x for x in data]) # Numpy datetime64 if data.dtype.kind == "M": from astropy.time import Time out[name] = Time(data, format="datetime64") if np.any(mask): out[name][mask] = np.ma.masked out[name].format = "isot" # Numpy timedelta64 elif data.dtype.kind == "m": from astropy.time import TimeDelta data_sec = data.astype("timedelta64[ns]").astype(np.float64) / 1e9 out[name] = TimeDelta(data_sec, format="sec") if np.any(mask): out[name][mask] = np.ma.masked else: if np.any(mask): out[name] = MaskedColumn(data=data, name=name, mask=mask, unit=unit) else: out[name] = Column(data=data, name=name, unit=unit) return cls(out) info = TableInfo() class QTable(Table): """A class to represent tables of heterogeneous data. `~astropy.table.QTable` provides a class for heterogeneous tabular data which can be easily modified, for instance adding columns or new rows. The `~astropy.table.QTable` class is identical to `~astropy.table.Table` except that columns with an associated ``unit`` attribute are converted to `~astropy.units.Quantity` objects. For more information see: - https://docs.astropy.org/en/stable/table/ - https://docs.astropy.org/en/stable/table/mixin_columns.html Parameters ---------- data : numpy ndarray, dict, list, table-like object, optional Data to initialize table. masked : bool, optional Specify whether the table is masked. names : list, optional Specify column names. dtype : list, optional Specify column data types. meta : dict, optional Metadata associated with the table. copy : bool, optional Copy the input data. Default is True. rows : numpy ndarray, list of list, optional Row-oriented data for table instead of ``data`` argument. copy_indices : bool, optional Copy any indices in the input data. Default is True. **kwargs : dict, optional Additional keyword args when converting table-like object. """ def _is_mixin_for_table(self, col): """ Determine if ``col`` should be added to the table directly as a mixin column. """ return has_info_class(col, MixinInfo) def _convert_col_for_table(self, col): if isinstance(col, Column) and getattr(col, "unit", None) is not None: # We need to turn the column into a quantity; use subok=True to allow # Quantity subclasses identified in the unit (such as u.mag()). q_cls = Masked(Quantity) if isinstance(col, MaskedColumn) else Quantity try: qcol = q_cls(col.data, col.unit, copy=False, subok=True) except Exception as exc: warnings.warn( f"column {col.info.name} has a unit but is kept as " f"a {col.__class__.__name__} as an attempt to " f"convert it to Quantity failed with:\n{exc!r}", AstropyUserWarning, ) else: qcol.info = col.info qcol.info.indices = col.info.indices col = qcol else: col = super()._convert_col_for_table(col) return col
844af8d0550ba2e472ae5c3c982a0b0d71dbdf7921e8dd0b4d5415af534d335f
"""High-level table operations. - join() - setdiff() - hstack() - vstack() - dstack() """ # Licensed under a 3-clause BSD style license - see LICENSE.rst import collections import itertools from collections import Counter, OrderedDict from collections.abc import Mapping, Sequence from copy import deepcopy import numpy as np from astropy.units import Quantity from astropy.utils import metadata from astropy.utils.masked import Masked from . import _np_utils from .np_utils import TableMergeError from .table import Column, MaskedColumn, QTable, Row, Table __all__ = [ "join", "setdiff", "hstack", "vstack", "unique", "join_skycoord", "join_distance", ] __doctest_requires__ = {"join_skycoord": ["scipy"], "join_distance": ["scipy"]} def _merge_table_meta(out, tables, metadata_conflicts="warn"): out_meta = deepcopy(tables[0].meta) for table in tables[1:]: out_meta = metadata.merge( out_meta, table.meta, metadata_conflicts=metadata_conflicts ) out.meta.update(out_meta) def _get_list_of_tables(tables): """ Check that tables is a Table or sequence of Tables. Returns the corresponding list of Tables. """ # Make sure we have a list of things if not isinstance(tables, Sequence): tables = [tables] # Make sure there is something to stack if len(tables) == 0: raise ValueError("no values provided to stack.") # Convert inputs (Table, Row, or anything column-like) to Tables. # Special case that Quantity converts to a QTable. for ii, val in enumerate(tables): if isinstance(val, Table): pass elif isinstance(val, Row): tables[ii] = Table(val) elif isinstance(val, Quantity): tables[ii] = QTable([val]) else: try: tables[ii] = Table([val]) except (ValueError, TypeError) as err: raise TypeError(f"Cannot convert {val} to table column.") from err return tables def _get_out_class(objs): """ From a list of input objects ``objs`` get merged output object class. This is just taken as the deepest subclass. This doesn't handle complicated inheritance schemes, but as a special case, classes which share ``info`` are taken to be compatible. """ out_class = objs[0].__class__ for obj in objs[1:]: if issubclass(obj.__class__, out_class): out_class = obj.__class__ if any( not ( issubclass(out_class, obj.__class__) or out_class.info is obj.__class__.info ) for obj in objs ): raise ValueError( f"unmergeable object classes {[type(obj).__name__ for obj in objs]}" ) return out_class def join_skycoord(distance, distance_func="search_around_sky"): """Helper function to join on SkyCoord columns using distance matching. This function is intended for use in ``table.join()`` to allow performing a table join where the key columns are both ``SkyCoord`` objects, matched by computing the distance between points and accepting values below ``distance``. The distance cross-matching is done using either `~astropy.coordinates.search_around_sky` or `~astropy.coordinates.search_around_3d`, depending on the value of ``distance_func``. The default is ``'search_around_sky'``. One can also provide a function object for ``distance_func``, in which case it must be a function that follows the same input and output API as `~astropy.coordinates.search_around_sky`. In this case the function will be called with ``(skycoord1, skycoord2, distance)`` as arguments. Parameters ---------- distance : `~astropy.units.Quantity` ['angle', 'length'] Maximum distance between points to be considered a join match. Must have angular or distance units. distance_func : str or function Specifies the function for performing the cross-match based on ``distance``. If supplied as a string this specifies the name of a function in `astropy.coordinates`. If supplied as a function then that function is called directly. Returns ------- join_func : function Function that accepts two ``SkyCoord`` columns (col1, col2) and returns the tuple (ids1, ids2) of pair-matched unique identifiers. Examples -------- This example shows an inner join of two ``SkyCoord`` columns, taking any sources within 0.2 deg to be a match. Note the new ``sc_id`` column which is added and provides a unique source identifier for the matches. >>> from astropy.coordinates import SkyCoord >>> import astropy.units as u >>> from astropy.table import Table, join_skycoord >>> from astropy import table >>> sc1 = SkyCoord([0, 1, 1.1, 2], [0, 0, 0, 0], unit='deg') >>> sc2 = SkyCoord([0.5, 1.05, 2.1], [0, 0, 0], unit='deg') >>> join_func = join_skycoord(0.2 * u.deg) >>> join_func(sc1, sc2) # Associate each coordinate with unique source ID (array([3, 1, 1, 2]), array([4, 1, 2])) >>> t1 = Table([sc1], names=['sc']) >>> t2 = Table([sc2], names=['sc']) >>> t12 = table.join(t1, t2, join_funcs={'sc': join_skycoord(0.2 * u.deg)}) >>> print(t12) # Note new `sc_id` column with the IDs from join_func() sc_id sc_1 sc_2 deg,deg deg,deg ----- ------- -------- 1 1.0,0.0 1.05,0.0 1 1.1,0.0 1.05,0.0 2 2.0,0.0 2.1,0.0 """ if isinstance(distance_func, str): import astropy.coordinates as coords try: distance_func = getattr(coords, distance_func) except AttributeError as err: raise ValueError( "distance_func must be a function in astropy.coordinates" ) from err else: from inspect import isfunction if not isfunction(distance_func): raise ValueError("distance_func must be a str or function") def join_func(sc1, sc2): # Call the appropriate SkyCoord method to find pairs within distance idxs1, idxs2, d2d, d3d = distance_func(sc1, sc2, distance) # Now convert that into unique identifiers for each near-pair. This is # taken to be transitive, so that if points 1 and 2 are "near" and points # 1 and 3 are "near", then 1, 2, and 3 are all given the same identifier. # This identifier will then be used in the table join matching. # Identifiers for each column, initialized to all zero. ids1 = np.zeros(len(sc1), dtype=int) ids2 = np.zeros(len(sc2), dtype=int) # Start the identifier count at 1 id_ = 1 for idx1, idx2 in zip(idxs1, idxs2): # If this col1 point is previously identified then set corresponding # col2 point to same identifier. Likewise for col2 and col1. if ids1[idx1] > 0: ids2[idx2] = ids1[idx1] elif ids2[idx2] > 0: ids1[idx1] = ids2[idx2] else: # Not yet seen so set identifier for col1 and col2 ids1[idx1] = id_ ids2[idx2] = id_ id_ += 1 # Fill in unique identifiers for points with no near neighbor for ids in (ids1, ids2): for idx in np.flatnonzero(ids == 0): ids[idx] = id_ id_ += 1 # End of enclosure join_func() return ids1, ids2 return join_func def join_distance(distance, kdtree_args=None, query_args=None): """Helper function to join table columns using distance matching. This function is intended for use in ``table.join()`` to allow performing a table join where the key columns are matched by computing the distance between points and accepting values below ``distance``. This numerical "fuzzy" match can apply to 1-D or 2-D columns, where in the latter case the distance is a vector distance. The distance cross-matching is done using `scipy.spatial.cKDTree`. If necessary you can tweak the default behavior by providing ``dict`` values for the ``kdtree_args`` or ``query_args``. Parameters ---------- distance : float or `~astropy.units.Quantity` ['length'] Maximum distance between points to be considered a join match kdtree_args : dict, None Optional extra args for `~scipy.spatial.cKDTree` query_args : dict, None Optional extra args for `~scipy.spatial.cKDTree.query_ball_tree` Returns ------- join_func : function Function that accepts (skycoord1, skycoord2) and returns the tuple (ids1, ids2) of pair-matched unique identifiers. Examples -------- >>> from astropy.table import Table, join_distance >>> from astropy import table >>> c1 = [0, 1, 1.1, 2] >>> c2 = [0.5, 1.05, 2.1] >>> t1 = Table([c1], names=['col']) >>> t2 = Table([c2], names=['col']) >>> t12 = table.join(t1, t2, join_type='outer', join_funcs={'col': join_distance(0.2)}) >>> print(t12) col_id col_1 col_2 ------ ----- ----- 1 1.0 1.05 1 1.1 1.05 2 2.0 2.1 3 0.0 -- 4 -- 0.5 """ try: from scipy.spatial import cKDTree except ImportError as exc: raise ImportError("scipy is required to use join_distance()") from exc if kdtree_args is None: kdtree_args = {} if query_args is None: query_args = {} def join_func(col1, col2): if col1.ndim > 2 or col2.ndim > 2: raise ValueError("columns for isclose_join must be 1- or 2-dimensional") if isinstance(distance, Quantity): # Convert to np.array with common unit col1 = col1.to_value(distance.unit) col2 = col2.to_value(distance.unit) dist = distance.value else: # Convert to np.array to allow later in-place shape changing col1 = np.asarray(col1) col2 = np.asarray(col2) dist = distance # Ensure columns are pure np.array and are 2-D for use with KDTree if col1.ndim == 1: col1.shape = col1.shape + (1,) if col2.ndim == 1: col2.shape = col2.shape + (1,) # Cross-match col1 and col2 within dist using KDTree kd1 = cKDTree(col1, **kdtree_args) kd2 = cKDTree(col2, **kdtree_args) nears = kd1.query_ball_tree(kd2, r=dist, **query_args) # Output of above is nears which is a list of lists, where the outer # list corresponds to each item in col1, and where the inner lists are # indexes into col2 of elements within the distance tolerance. This # identifies col1 / col2 near pairs. # Now convert that into unique identifiers for each near-pair. This is # taken to be transitive, so that if points 1 and 2 are "near" and points # 1 and 3 are "near", then 1, 2, and 3 are all given the same identifier. # This identifier will then be used in the table join matching. # Identifiers for each column, initialized to all zero. ids1 = np.zeros(len(col1), dtype=int) ids2 = np.zeros(len(col2), dtype=int) # Start the identifier count at 1 id_ = 1 for idx1, idxs2 in enumerate(nears): for idx2 in idxs2: # If this col1 point is previously identified then set corresponding # col2 point to same identifier. Likewise for col2 and col1. if ids1[idx1] > 0: ids2[idx2] = ids1[idx1] elif ids2[idx2] > 0: ids1[idx1] = ids2[idx2] else: # Not yet seen so set identifier for col1 and col2 ids1[idx1] = id_ ids2[idx2] = id_ id_ += 1 # Fill in unique identifiers for points with no near neighbor for ids in (ids1, ids2): for idx in np.flatnonzero(ids == 0): ids[idx] = id_ id_ += 1 # End of enclosure join_func() return ids1, ids2 return join_func def join( left, right, keys=None, join_type="inner", *, keys_left=None, keys_right=None, uniq_col_name="{col_name}_{table_name}", table_names=["1", "2"], metadata_conflicts="warn", join_funcs=None, ): """ Perform a join of the left table with the right table on specified keys. Parameters ---------- left : `~astropy.table.Table`-like object Left side table in the join. If not a Table, will call ``Table(left)`` right : `~astropy.table.Table`-like object Right side table in the join. If not a Table, will call ``Table(right)`` keys : str or list of str Name(s) of column(s) used to match rows of left and right tables. Default is to use all columns which are common to both tables. join_type : str Join type ('inner' | 'outer' | 'left' | 'right' | 'cartesian'), default is 'inner' keys_left : str or list of str or list of column-like, optional Left column(s) used to match rows instead of ``keys`` arg. This can be be a single left table column name or list of column names, or a list of column-like values with the same lengths as the left table. keys_right : str or list of str or list of column-like, optional Same as ``keys_left``, but for the right side of the join. uniq_col_name : str or None String generate a unique output column name in case of a conflict. The default is '{col_name}_{table_name}'. table_names : list of str or None Two-element list of table names used when generating unique output column names. The default is ['1', '2']. metadata_conflicts : str How to proceed with metadata conflicts. This should be one of: * ``'silent'``: silently pick the last conflicting meta-data value * ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default) * ``'error'``: raise an exception. join_funcs : dict, None Dict of functions to use for matching the corresponding key column(s). See `~astropy.table.join_skycoord` for an example and details. Returns ------- joined_table : `~astropy.table.Table` object New table containing the result of the join operation. """ # Try converting inputs to Table as needed if not isinstance(left, Table): left = Table(left) if not isinstance(right, Table): right = Table(right) col_name_map = OrderedDict() out = _join( left, right, keys, join_type, uniq_col_name, table_names, col_name_map, metadata_conflicts, join_funcs, keys_left=keys_left, keys_right=keys_right, ) # Merge the column and table meta data. Table subclasses might override # these methods for custom merge behavior. _merge_table_meta(out, [left, right], metadata_conflicts=metadata_conflicts) return out def setdiff(table1, table2, keys=None): """ Take a set difference of table rows. The row set difference will contain all rows in ``table1`` that are not present in ``table2``. If the keys parameter is not defined, all columns in ``table1`` will be included in the output table. Parameters ---------- table1 : `~astropy.table.Table` ``table1`` is on the left side of the set difference. table2 : `~astropy.table.Table` ``table2`` is on the right side of the set difference. keys : str or list of str Name(s) of column(s) used to match rows of left and right tables. Default is to use all columns in ``table1``. Returns ------- diff_table : `~astropy.table.Table` New table containing the set difference between tables. If the set difference is none, an empty table will be returned. Examples -------- To get a set difference between two tables:: >>> from astropy.table import setdiff, Table >>> t1 = Table({'a': [1, 4, 9], 'b': ['c', 'd', 'f']}, names=('a', 'b')) >>> t2 = Table({'a': [1, 5, 9], 'b': ['c', 'b', 'f']}, names=('a', 'b')) >>> print(t1) a b --- --- 1 c 4 d 9 f >>> print(t2) a b --- --- 1 c 5 b 9 f >>> print(setdiff(t1, t2)) a b --- --- 4 d >>> print(setdiff(t2, t1)) a b --- --- 5 b """ if keys is None: keys = table1.colnames # Check that all keys are in table1 and table2 for tbl, tbl_str in ((table1, "table1"), (table2, "table2")): diff_keys = np.setdiff1d(keys, tbl.colnames) if len(diff_keys) != 0: raise ValueError( "The {} columns are missing from {}, cannot take " "a set difference.".format(diff_keys, tbl_str) ) # Make a light internal copy of both tables t1 = table1.copy(copy_data=False) t1.meta = {} t1.keep_columns(keys) t1["__index1__"] = np.arange(len(table1)) # Keep track of rows indices # Make a light internal copy to avoid touching table2 t2 = table2.copy(copy_data=False) t2.meta = {} t2.keep_columns(keys) # Dummy column to recover rows after join t2["__index2__"] = np.zeros(len(t2), dtype=np.uint8) # dummy column t12 = _join(t1, t2, join_type="left", keys=keys, metadata_conflicts="silent") # If t12 index2 is masked then that means some rows were in table1 but not table2. if hasattr(t12["__index2__"], "mask"): # Define bool mask of table1 rows not in table2 diff = t12["__index2__"].mask # Get the row indices of table1 for those rows idx = t12["__index1__"][diff] # Select corresponding table1 rows straight from table1 to ensure # correct table and column types. t12_diff = table1[idx] else: t12_diff = table1[[]] return t12_diff def dstack(tables, join_type="outer", metadata_conflicts="warn"): """ Stack columns within tables depth-wise. A ``join_type`` of 'exact' means that the tables must all have exactly the same column names (though the order can vary). If ``join_type`` is 'inner' then the intersection of common columns will be the output. A value of 'outer' (default) means the output will have the union of all columns, with table values being masked where no common values are available. Parameters ---------- tables : `~astropy.table.Table` or `~astropy.table.Row` or list thereof Table(s) to stack along depth-wise with the current table Table columns should have same shape and name for depth-wise stacking join_type : str Join type ('inner' | 'exact' | 'outer'), default is 'outer' metadata_conflicts : str How to proceed with metadata conflicts. This should be one of: * ``'silent'``: silently pick the last conflicting meta-data value * ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default) * ``'error'``: raise an exception. Returns ------- stacked_table : `~astropy.table.Table` object New table containing the stacked data from the input tables. Examples -------- To stack two tables along rows do:: >>> from astropy.table import dstack, Table >>> t1 = Table({'a': [1., 2.], 'b': [3., 4.]}, names=('a', 'b')) >>> t2 = Table({'a': [5., 6.], 'b': [7., 8.]}, names=('a', 'b')) >>> print(t1) a b --- --- 1.0 3.0 2.0 4.0 >>> print(t2) a b --- --- 5.0 7.0 6.0 8.0 >>> print(dstack([t1, t2])) a b ---------- ---------- 1.0 .. 5.0 3.0 .. 7.0 2.0 .. 6.0 4.0 .. 8.0 """ _check_join_type(join_type, "dstack") tables = _get_list_of_tables(tables) if len(tables) == 1: return tables[0] # no point in stacking a single table n_rows = {len(table) for table in tables} if len(n_rows) != 1: raise ValueError("Table lengths must all match for dstack") n_row = n_rows.pop() out = vstack(tables, join_type, metadata_conflicts) for name, col in out.columns.items(): col = out[name] # Reshape to so each original column is now in a row. # If entries are not 0-dim then those additional shape dims # are just carried along. # [x x x y y y] => [[x x x], # [y y y]] new_shape = (len(tables), n_row) + col.shape[1:] try: col.shape = (len(tables), n_row) + col.shape[1:] except AttributeError: col = col.reshape(new_shape) # Transpose the table and row axes to get to # [[x, y], # [x, y] # [x, y]] axes = np.arange(len(col.shape)) axes[:2] = [1, 0] # This temporarily makes `out` be corrupted (columns of different # length) but it all works out in the end. out.columns.__setitem__(name, col.transpose(axes), validated=True) return out def vstack(tables, join_type="outer", metadata_conflicts="warn"): """ Stack tables vertically (along rows). A ``join_type`` of 'exact' means that the tables must all have exactly the same column names (though the order can vary). If ``join_type`` is 'inner' then the intersection of common columns will be the output. A value of 'outer' (default) means the output will have the union of all columns, with table values being masked where no common values are available. Parameters ---------- tables : `~astropy.table.Table` or `~astropy.table.Row` or list thereof Table(s) to stack along rows (vertically) with the current table join_type : str Join type ('inner' | 'exact' | 'outer'), default is 'outer' metadata_conflicts : str How to proceed with metadata conflicts. This should be one of: * ``'silent'``: silently pick the last conflicting meta-data value * ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default) * ``'error'``: raise an exception. Returns ------- stacked_table : `~astropy.table.Table` object New table containing the stacked data from the input tables. Examples -------- To stack two tables along rows do:: >>> from astropy.table import vstack, Table >>> t1 = Table({'a': [1, 2], 'b': [3, 4]}, names=('a', 'b')) >>> t2 = Table({'a': [5, 6], 'b': [7, 8]}, names=('a', 'b')) >>> print(t1) a b --- --- 1 3 2 4 >>> print(t2) a b --- --- 5 7 6 8 >>> print(vstack([t1, t2])) a b --- --- 1 3 2 4 5 7 6 8 """ _check_join_type(join_type, "vstack") tables = _get_list_of_tables(tables) # validates input if len(tables) == 1: return tables[0] # no point in stacking a single table col_name_map = OrderedDict() out = _vstack(tables, join_type, col_name_map, metadata_conflicts) # Merge table metadata _merge_table_meta(out, tables, metadata_conflicts=metadata_conflicts) return out def hstack( tables, join_type="outer", uniq_col_name="{col_name}_{table_name}", table_names=None, metadata_conflicts="warn", ): """ Stack tables along columns (horizontally). A ``join_type`` of 'exact' means that the tables must all have exactly the same number of rows. If ``join_type`` is 'inner' then the intersection of rows will be the output. A value of 'outer' (default) means the output will have the union of all rows, with table values being masked where no common values are available. Parameters ---------- tables : `~astropy.table.Table` or `~astropy.table.Row` or list thereof Tables to stack along columns (horizontally) with the current table join_type : str Join type ('inner' | 'exact' | 'outer'), default is 'outer' uniq_col_name : str or None String generate a unique output column name in case of a conflict. The default is '{col_name}_{table_name}'. table_names : list of str or None Two-element list of table names used when generating unique output column names. The default is ['1', '2', ..]. metadata_conflicts : str How to proceed with metadata conflicts. This should be one of: * ``'silent'``: silently pick the last conflicting meta-data value * ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default) * ``'error'``: raise an exception. Returns ------- stacked_table : `~astropy.table.Table` object New table containing the stacked data from the input tables. See Also -------- Table.add_columns, Table.replace_column, Table.update Examples -------- To stack two tables horizontally (along columns) do:: >>> from astropy.table import Table, hstack >>> t1 = Table({'a': [1, 2], 'b': [3, 4]}, names=('a', 'b')) >>> t2 = Table({'c': [5, 6], 'd': [7, 8]}, names=('c', 'd')) >>> print(t1) a b --- --- 1 3 2 4 >>> print(t2) c d --- --- 5 7 6 8 >>> print(hstack([t1, t2])) a b c d --- --- --- --- 1 3 5 7 2 4 6 8 """ _check_join_type(join_type, "hstack") tables = _get_list_of_tables(tables) # validates input if len(tables) == 1: return tables[0] # no point in stacking a single table col_name_map = OrderedDict() out = _hstack(tables, join_type, uniq_col_name, table_names, col_name_map) _merge_table_meta(out, tables, metadata_conflicts=metadata_conflicts) return out def unique(input_table, keys=None, silent=False, keep="first"): """ Returns the unique rows of a table. Parameters ---------- input_table : table-like keys : str or list of str Name(s) of column(s) used to create unique rows. Default is to use all columns. keep : {'first', 'last', 'none'} Whether to keep the first or last row for each set of duplicates. If 'none', all rows that are duplicate are removed, leaving only rows that are already unique in the input. Default is 'first'. silent : bool If `True`, masked value column(s) are silently removed from ``keys``. If `False`, an exception is raised when ``keys`` contains masked value column(s). Default is `False`. Returns ------- unique_table : `~astropy.table.Table` object New table containing only the unique rows of ``input_table``. Examples -------- >>> from astropy.table import unique, Table >>> import numpy as np >>> table = Table(data=[[1,2,3,2,3,3], ... [2,3,4,5,4,6], ... [3,4,5,6,7,8]], ... names=['col1', 'col2', 'col3'], ... dtype=[np.int32, np.int32, np.int32]) >>> table <Table length=6> col1 col2 col3 int32 int32 int32 ----- ----- ----- 1 2 3 2 3 4 3 4 5 2 5 6 3 4 7 3 6 8 >>> unique(table, keys='col1') <Table length=3> col1 col2 col3 int32 int32 int32 ----- ----- ----- 1 2 3 2 3 4 3 4 5 >>> unique(table, keys=['col1'], keep='last') <Table length=3> col1 col2 col3 int32 int32 int32 ----- ----- ----- 1 2 3 2 5 6 3 6 8 >>> unique(table, keys=['col1', 'col2']) <Table length=5> col1 col2 col3 int32 int32 int32 ----- ----- ----- 1 2 3 2 3 4 2 5 6 3 4 5 3 6 8 >>> unique(table, keys=['col1', 'col2'], keep='none') <Table length=4> col1 col2 col3 int32 int32 int32 ----- ----- ----- 1 2 3 2 3 4 2 5 6 3 6 8 >>> unique(table, keys=['col1'], keep='none') <Table length=1> col1 col2 col3 int32 int32 int32 ----- ----- ----- 1 2 3 """ if keep not in ("first", "last", "none"): raise ValueError("'keep' should be one of 'first', 'last', 'none'") if isinstance(keys, str): keys = [keys] if keys is None: keys = input_table.colnames else: if len(set(keys)) != len(keys): raise ValueError("duplicate key names") # Check for columns with masked values for key in keys[:]: col = input_table[key] if hasattr(col, "mask") and np.any(col.mask): if not silent: raise ValueError( "cannot use columns with masked values as keys; " "remove column '{}' from keys and rerun " "unique()".format(key) ) del keys[keys.index(key)] if len(keys) == 0: raise ValueError( "no column remained in ``keys``; " "unique() cannot work with masked value " "key columns" ) grouped_table = input_table.group_by(keys) indices = grouped_table.groups.indices if keep == "first": indices = indices[:-1] elif keep == "last": indices = indices[1:] - 1 else: indices = indices[:-1][np.diff(indices) == 1] return grouped_table[indices] def get_col_name_map( arrays, common_names, uniq_col_name="{col_name}_{table_name}", table_names=None ): """ Find the column names mapping when merging the list of tables ``arrays``. It is assumed that col names in ``common_names`` are to be merged into a single column while the rest will be uniquely represented in the output. The args ``uniq_col_name`` and ``table_names`` specify how to rename columns in case of conflicts. Returns a dict mapping each output column name to the input(s). This takes the form {outname : (col_name_0, col_name_1, ...), ... }. For key columns all of input names will be present, while for the other non-key columns the value will be (col_name_0, None, ..) or (None, col_name_1, ..) etc. """ col_name_map = collections.defaultdict(lambda: [None] * len(arrays)) col_name_list = [] if table_names is None: table_names = [str(ii + 1) for ii in range(len(arrays))] for idx, array in enumerate(arrays): table_name = table_names[idx] for name in array.colnames: out_name = name if name in common_names: # If name is in the list of common_names then insert into # the column name list, but just once. if name not in col_name_list: col_name_list.append(name) else: # If name is not one of the common column outputs, and it collides # with the names in one of the other arrays, then rename others = list(arrays) others.pop(idx) if any(name in other.colnames for other in others): out_name = uniq_col_name.format( table_name=table_name, col_name=name ) col_name_list.append(out_name) col_name_map[out_name][idx] = name # Check for duplicate output column names col_name_count = Counter(col_name_list) repeated_names = [name for name, count in col_name_count.items() if count > 1] if repeated_names: raise TableMergeError( "Merging column names resulted in duplicates: {}. " "Change uniq_col_name or table_names args to fix this.".format( repeated_names ) ) # Convert col_name_map to a regular dict with tuple (immutable) values col_name_map = OrderedDict((name, col_name_map[name]) for name in col_name_list) return col_name_map def get_descrs(arrays, col_name_map): """ Find the dtypes descrs resulting from merging the list of arrays' dtypes, using the column name mapping ``col_name_map``. Return a list of descrs for the output. """ out_descrs = [] for out_name, in_names in col_name_map.items(): # List of input arrays that contribute to this output column in_cols = [arr[name] for arr, name in zip(arrays, in_names) if name is not None] # List of names of the columns that contribute to this output column. names = [name for name in in_names if name is not None] # Output dtype is the superset of all dtypes in in_arrays try: dtype = common_dtype(in_cols) except TableMergeError as tme: # Beautify the error message when we are trying to merge columns with incompatible # types by including the name of the columns that originated the error. raise TableMergeError( "The '{}' columns have incompatible types: {}".format( names[0], tme._incompat_types ) ) from tme # Make sure all input shapes are the same uniq_shapes = {col.shape[1:] for col in in_cols} if len(uniq_shapes) != 1: raise TableMergeError(f"Key columns {names!r} have different shape") shape = uniq_shapes.pop() if out_name is not None: out_name = str(out_name) out_descrs.append((out_name, dtype, shape)) return out_descrs def common_dtype(cols): """ Use numpy to find the common dtype for a list of columns. Only allow columns within the following fundamental numpy data types: np.bool_, np.object_, np.number, np.character, np.void """ try: return metadata.common_dtype(cols) except metadata.MergeConflictError as err: tme = TableMergeError(f"Columns have incompatible types {err._incompat_types}") tme._incompat_types = err._incompat_types raise tme from err def _get_join_sort_idxs(keys, left, right): # Go through each of the key columns in order and make columns for # a new structured array that represents the lexical ordering of those # key columns. This structured array is then argsort'ed. The trick here # is that some columns (e.g. Time) may need to be expanded into multiple # columns for ordering here. ii = 0 # Index for uniquely naming the sort columns # sortable_table dtypes as list of (name, dtype_str, shape) tuples sort_keys_dtypes = [] sort_keys = [] # sortable_table (structured ndarray) column names sort_left = {} # sortable ndarrays from left table sort_right = {} # sortable ndarray from right table for key in keys: # get_sortable_arrays() returns a list of ndarrays that can be lexically # sorted to represent the order of the column. In most cases this is just # a single element of the column itself. left_sort_cols = left[key].info.get_sortable_arrays() right_sort_cols = right[key].info.get_sortable_arrays() if len(left_sort_cols) != len(right_sort_cols): # Should never happen because cols are screened beforehand for compatibility raise RuntimeError("mismatch in sort cols lengths") for left_sort_col, right_sort_col in zip(left_sort_cols, right_sort_cols): # Check for consistency of shapes. Mismatch should never happen. shape = left_sort_col.shape[1:] if shape != right_sort_col.shape[1:]: raise RuntimeError("mismatch in shape of left vs. right sort array") if shape != (): raise ValueError(f"sort key column {key!r} must be 1-d") sort_key = str(ii) sort_keys.append(sort_key) sort_left[sort_key] = left_sort_col sort_right[sort_key] = right_sort_col # Build up dtypes for the structured array that gets sorted. dtype_str = common_dtype([left_sort_col, right_sort_col]) sort_keys_dtypes.append((sort_key, dtype_str)) ii += 1 # Make the empty sortable table and fill it len_left = len(left) sortable_table = np.empty(len_left + len(right), dtype=sort_keys_dtypes) for key in sort_keys: sortable_table[key][:len_left] = sort_left[key] sortable_table[key][len_left:] = sort_right[key] # Finally do the (lexical) argsort and make a new sorted version idx_sort = sortable_table.argsort(order=sort_keys) sorted_table = sortable_table[idx_sort] # Get indexes of unique elements (i.e. the group boundaries) diffs = np.concatenate(([True], sorted_table[1:] != sorted_table[:-1], [True])) idxs = np.flatnonzero(diffs) return idxs, idx_sort def _apply_join_funcs(left, right, keys, join_funcs): """Apply join_funcs.""" # Make light copies of left and right, then add new index columns. left = left.copy(copy_data=False) right = right.copy(copy_data=False) for key, join_func in join_funcs.items(): ids1, ids2 = join_func(left[key], right[key]) # Define a unique id_key name, and keep adding underscores until we have # a name not yet present. id_key = key + "_id" while id_key in left.columns or id_key in right.columns: id_key = id_key[:-2] + "_id" keys = tuple(id_key if orig_key == key else orig_key for orig_key in keys) left.add_column(ids1, index=0, name=id_key) # [id_key] = ids1 right.add_column(ids2, index=0, name=id_key) # [id_key] = ids2 return left, right, keys def _join( left, right, keys=None, join_type="inner", uniq_col_name="{col_name}_{table_name}", table_names=["1", "2"], col_name_map=None, metadata_conflicts="warn", join_funcs=None, keys_left=None, keys_right=None, ): """ Perform a join of the left and right Tables on specified keys. Parameters ---------- left : Table Left side table in the join right : Table Right side table in the join keys : str or list of str Name(s) of column(s) used to match rows of left and right tables. Default is to use all columns which are common to both tables. join_type : str Join type ('inner' | 'outer' | 'left' | 'right' | 'cartesian'), default is 'inner' uniq_col_name : str or None String generate a unique output column name in case of a conflict. The default is '{col_name}_{table_name}'. table_names : list of str or None Two-element list of table names used when generating unique output column names. The default is ['1', '2']. col_name_map : empty dict or None If passed as a dict then it will be updated in-place with the mapping of output to input column names. metadata_conflicts : str How to proceed with metadata conflicts. This should be one of: * ``'silent'``: silently pick the last conflicting meta-data value * ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default) * ``'error'``: raise an exception. join_funcs : dict, None Dict of functions to use for matching the corresponding key column(s). See `~astropy.table.join_skycoord` for an example and details. Returns ------- joined_table : `~astropy.table.Table` object New table containing the result of the join operation. """ # Store user-provided col_name_map until the end _col_name_map = col_name_map # Special column name for cartesian join, should never collide with real column cartesian_index_name = "__table_cartesian_join_temp_index__" if join_type not in ("inner", "outer", "left", "right", "cartesian"): raise ValueError( "The 'join_type' argument should be in 'inner', " "'outer', 'left', 'right', or 'cartesian' " "(got '{}' instead)".format(join_type) ) if join_type == "cartesian": if keys: raise ValueError("cannot supply keys for a cartesian join") if join_funcs: raise ValueError("cannot supply join_funcs for a cartesian join") # Make light copies of left and right, then add temporary index columns # with all the same value so later an outer join turns into a cartesian join. left = left.copy(copy_data=False) right = right.copy(copy_data=False) left[cartesian_index_name] = np.uint8(0) right[cartesian_index_name] = np.uint8(0) keys = (cartesian_index_name,) # Handle the case of join key columns that are different between left and # right via keys_left/keys_right args. This is done by saving the original # input tables and making new left and right tables that contain only the # key cols but with common column names ['0', '1', etc]. This sets `keys` to # those fake key names in the left and right tables if keys_left is not None or keys_right is not None: left_orig = left right_orig = right left, right, keys = _join_keys_left_right( left, right, keys, keys_left, keys_right, join_funcs ) if keys is None: keys = tuple(name for name in left.colnames if name in right.colnames) if len(keys) == 0: raise TableMergeError("No keys in common between left and right tables") elif isinstance(keys, str): # If we have a single key, put it in a tuple keys = (keys,) # Check the key columns for arr, arr_label in ((left, "Left"), (right, "Right")): for name in keys: if name not in arr.colnames: raise TableMergeError( f"{arr_label} table does not have key column {name!r}" ) if hasattr(arr[name], "mask") and np.any(arr[name].mask): raise TableMergeError( f"{arr_label} key column {name!r} has missing values" ) if join_funcs is not None: if not all(key in keys for key in join_funcs): raise ValueError( f"join_funcs keys {join_funcs.keys()} must be a " f"subset of join keys {keys}" ) left, right, keys = _apply_join_funcs(left, right, keys, join_funcs) len_left, len_right = len(left), len(right) if len_left == 0 or len_right == 0: raise ValueError("input tables for join must both have at least one row") try: idxs, idx_sort = _get_join_sort_idxs(keys, left, right) except NotImplementedError: raise TypeError("one or more key columns are not sortable") # Now that we have idxs and idx_sort, revert to the original table args to # carry on with making the output joined table. `keys` is set to to an empty # list so that all original left and right columns are included in the # output table. if keys_left is not None or keys_right is not None: keys = [] left = left_orig right = right_orig # Joined array dtype as a list of descr (name, type_str, shape) tuples col_name_map = get_col_name_map([left, right], keys, uniq_col_name, table_names) out_descrs = get_descrs([left, right], col_name_map) # Main inner loop in Cython to compute the cartesian product # indices for the given join type int_join_type = {"inner": 0, "outer": 1, "left": 2, "right": 3, "cartesian": 1}[ join_type ] masked, n_out, left_out, left_mask, right_out, right_mask = _np_utils.join_inner( idxs, idx_sort, len_left, int_join_type ) out = _get_out_class([left, right])() for out_name, dtype, shape in out_descrs: if out_name == cartesian_index_name: continue left_name, right_name = col_name_map[out_name] if left_name and right_name: # this is a key which comes from left and right cols = [left[left_name], right[right_name]] col_cls = _get_out_class(cols) if not hasattr(col_cls.info, "new_like"): raise NotImplementedError( f"join unavailable for mixin column type(s): {col_cls.__name__}" ) out[out_name] = col_cls.info.new_like( cols, n_out, metadata_conflicts, out_name ) out[out_name][:] = np.where( right_mask, left[left_name].take(left_out), right[right_name].take(right_out), ) continue elif left_name: # out_name came from the left table name, array, array_out, array_mask = left_name, left, left_out, left_mask elif right_name: name, array, array_out, array_mask = ( right_name, right, right_out, right_mask, ) else: raise TableMergeError('Unexpected column names (maybe one is ""?)') # Select the correct elements from the original table col = array[name][array_out] # If the output column is masked then set the output column masking # accordingly. Check for columns that don't support a mask attribute. if masked and np.any(array_mask): # If col is a Column but not MaskedColumn then upgrade at this point # because masking is required. if isinstance(col, Column) and not isinstance(col, MaskedColumn): col = out.MaskedColumn(col, copy=False) if isinstance(col, Quantity) and not isinstance(col, Masked): col = Masked(col, copy=False) # array_mask is 1-d corresponding to length of output column. We need # make it have the correct shape for broadcasting, i.e. (length, 1, 1, ..). # Mixin columns might not have ndim attribute so use len(col.shape). array_mask.shape = (col.shape[0],) + (1,) * (len(col.shape) - 1) # Now broadcast to the correct final shape array_mask = np.broadcast_to(array_mask, col.shape) try: col[array_mask] = col.info.mask_val except Exception as err: # Not clear how different classes will fail here raise NotImplementedError( "join requires masking column '{}' but column" " type {} does not support masking".format( out_name, col.__class__.__name__ ) ) from err # Set the output table column to the new joined column out[out_name] = col # If col_name_map supplied as a dict input, then update. if isinstance(_col_name_map, Mapping): _col_name_map.update(col_name_map) return out def _join_keys_left_right(left, right, keys, keys_left, keys_right, join_funcs): """Do processing to handle keys_left / keys_right args for join. This takes the keys_left/right inputs and turns them into a list of left/right columns corresponding to those inputs (which can be column names or column data values). It also generates the list of fake key column names (strings of "1", "2", etc.) that correspond to the input keys. """ def _keys_to_cols(keys, table, label): # Process input `keys`, which is a str or list of str column names in # `table` or a list of column-like objects. The `label` is just for # error reporting. if isinstance(keys, str): keys = [keys] cols = [] for key in keys: if isinstance(key, str): try: cols.append(table[key]) except KeyError: raise ValueError(f"{label} table does not have key column {key!r}") else: if len(key) != len(table): raise ValueError( f"{label} table has different length from key {key}" ) cols.append(key) return cols if join_funcs is not None: raise ValueError("cannot supply join_funcs arg and keys_left / keys_right") if keys_left is None or keys_right is None: raise ValueError("keys_left and keys_right must both be provided") if keys is not None: raise ValueError( "keys arg must be None if keys_left and keys_right are supplied" ) cols_left = _keys_to_cols(keys_left, left, "left") cols_right = _keys_to_cols(keys_right, right, "right") if len(cols_left) != len(cols_right): raise ValueError("keys_left and keys_right args must have same length") # Make two new temp tables for the join with only the join columns and # key columns in common. keys = [f"{ii}" for ii in range(len(cols_left))] left = left.__class__(cols_left, names=keys, copy=False) right = right.__class__(cols_right, names=keys, copy=False) return left, right, keys def _check_join_type(join_type, func_name): """Check join_type arg in hstack and vstack. This specifically checks for the common mistake of call vstack(t1, t2) instead of vstack([t1, t2]). The subsequent check of ``join_type in ('inner', ..)`` does not raise in this case. """ if not isinstance(join_type, str): msg = "`join_type` arg must be a string" if isinstance(join_type, Table): msg += ( ". Did you accidentally " f"call {func_name}(t1, t2, ..) instead of " f"{func_name}([t1, t2], ..)?" ) raise TypeError(msg) if join_type not in ("inner", "exact", "outer"): raise ValueError("`join_type` arg must be one of 'inner', 'exact' or 'outer'") def _vstack(arrays, join_type="outer", col_name_map=None, metadata_conflicts="warn"): """ Stack Tables vertically (by rows). A ``join_type`` of 'exact' (default) means that the arrays must all have exactly the same column names (though the order can vary). If ``join_type`` is 'inner' then the intersection of common columns will be the output. A value of 'outer' means the output will have the union of all columns, with array values being masked where no common values are available. Parameters ---------- arrays : list of Tables Tables to stack by rows (vertically) join_type : str Join type ('inner' | 'exact' | 'outer'), default is 'outer' col_name_map : empty dict or None If passed as a dict then it will be updated in-place with the mapping of output to input column names. Returns ------- stacked_table : `~astropy.table.Table` object New table containing the stacked data from the input tables. """ # Store user-provided col_name_map until the end _col_name_map = col_name_map # Trivial case of one input array if len(arrays) == 1: return arrays[0] # Start by assuming an outer match where all names go to output names = set(itertools.chain(*[arr.colnames for arr in arrays])) col_name_map = get_col_name_map(arrays, names) # If require_match is True then the output must have exactly the same # number of columns as each input array if join_type == "exact": for names in col_name_map.values(): if any(x is None for x in names): raise TableMergeError( "Inconsistent columns in input arrays " "(use 'inner' or 'outer' join_type to " "allow non-matching columns)" ) join_type = "outer" # For an inner join, keep only columns where all input arrays have that column if join_type == "inner": col_name_map = OrderedDict( (name, in_names) for name, in_names in col_name_map.items() if all(x is not None for x in in_names) ) if len(col_name_map) == 0: raise TableMergeError("Input arrays have no columns in common") lens = [len(arr) for arr in arrays] n_rows = sum(lens) out = _get_out_class(arrays)() for out_name, in_names in col_name_map.items(): # List of input arrays that contribute to this output column cols = [arr[name] for arr, name in zip(arrays, in_names) if name is not None] col_cls = _get_out_class(cols) if not hasattr(col_cls.info, "new_like"): raise NotImplementedError( f"vstack unavailable for mixin column type(s): {col_cls.__name__}" ) try: col = col_cls.info.new_like(cols, n_rows, metadata_conflicts, out_name) except metadata.MergeConflictError as err: # Beautify the error message when we are trying to merge columns with incompatible # types by including the name of the columns that originated the error. raise TableMergeError( "The '{}' columns have incompatible types: {}".format( out_name, err._incompat_types ) ) from err idx0 = 0 for name, array in zip(in_names, arrays): idx1 = idx0 + len(array) if name in array.colnames: col[idx0:idx1] = array[name] else: # If col is a Column but not MaskedColumn then upgrade at this point # because masking is required. if isinstance(col, Column) and not isinstance(col, MaskedColumn): col = out.MaskedColumn(col, copy=False) if isinstance(col, Quantity) and not isinstance(col, Masked): col = Masked(col, copy=False) try: col[idx0:idx1] = col.info.mask_val except Exception as err: raise NotImplementedError( "vstack requires masking column '{}' but column" " type {} does not support masking".format( out_name, col.__class__.__name__ ) ) from err idx0 = idx1 out[out_name] = col # If col_name_map supplied as a dict input, then update. if isinstance(_col_name_map, Mapping): _col_name_map.update(col_name_map) return out def _hstack( arrays, join_type="outer", uniq_col_name="{col_name}_{table_name}", table_names=None, col_name_map=None, ): """ Stack tables horizontally (by columns). A ``join_type`` of 'exact' (default) means that the arrays must all have exactly the same number of rows. If ``join_type`` is 'inner' then the intersection of rows will be the output. A value of 'outer' means the output will have the union of all rows, with array values being masked where no common values are available. Parameters ---------- arrays : List of tables Tables to stack by columns (horizontally) join_type : str Join type ('inner' | 'exact' | 'outer'), default is 'outer' uniq_col_name : str or None String generate a unique output column name in case of a conflict. The default is '{col_name}_{table_name}'. table_names : list of str or None Two-element list of table names used when generating unique output column names. The default is ['1', '2', ..]. Returns ------- stacked_table : `~astropy.table.Table` object New table containing the stacked data from the input tables. """ # Store user-provided col_name_map until the end _col_name_map = col_name_map if table_names is None: table_names = [f"{ii + 1}" for ii in range(len(arrays))] if len(arrays) != len(table_names): raise ValueError("Number of arrays must match number of table_names") # Trivial case of one input arrays if len(arrays) == 1: return arrays[0] col_name_map = get_col_name_map(arrays, [], uniq_col_name, table_names) # If require_match is True then all input arrays must have the same length arr_lens = [len(arr) for arr in arrays] if join_type == "exact": if len(set(arr_lens)) > 1: raise TableMergeError( "Inconsistent number of rows in input arrays " "(use 'inner' or 'outer' join_type to allow " "non-matching rows)" ) join_type = "outer" # For an inner join, keep only the common rows if join_type == "inner": min_arr_len = min(arr_lens) if len(set(arr_lens)) > 1: arrays = [arr[:min_arr_len] for arr in arrays] arr_lens = [min_arr_len for arr in arrays] # If there are any output rows where one or more input arrays are missing # then the output must be masked. If any input arrays are masked then # output is masked. n_rows = max(arr_lens) out = _get_out_class(arrays)() for out_name, in_names in col_name_map.items(): for name, array, arr_len in zip(in_names, arrays, arr_lens): if name is None: continue if n_rows > arr_len: indices = np.arange(n_rows) indices[arr_len:] = 0 col = array[name][indices] # If col is a Column but not MaskedColumn then upgrade at this point # because masking is required. if isinstance(col, Column) and not isinstance(col, MaskedColumn): col = out.MaskedColumn(col, copy=False) if isinstance(col, Quantity) and not isinstance(col, Masked): col = Masked(col, copy=False) try: col[arr_len:] = col.info.mask_val except Exception as err: raise NotImplementedError( "hstack requires masking column '{}' but column" " type {} does not support masking".format( out_name, col.__class__.__name__ ) ) from err else: col = array[name][:n_rows] out[out_name] = col # If col_name_map supplied as a dict input, then update. if isinstance(_col_name_map, Mapping): _col_name_map.update(col_name_map) return out
30929d6e5451165417488bc3832240f2df197ec413907084d1c07723a9a955f5
import copy import json import textwrap from collections import OrderedDict import numpy as np import yaml __all__ = ["get_header_from_yaml", "get_yaml_from_header", "get_yaml_from_table"] class ColumnOrderList(list): """ List of tuples that sorts in a specific order that makes sense for astropy table column attributes. """ def sort(self, *args, **kwargs): super().sort() column_keys = ["name", "unit", "datatype", "format", "description", "meta"] in_dict = dict(self) out_list = [] for key in column_keys: if key in in_dict: out_list.append((key, in_dict[key])) for key, val in self: if key not in column_keys: out_list.append((key, val)) # Clear list in-place del self[:] self.extend(out_list) class ColumnDict(dict): """ Specialized dict subclass to represent attributes of a Column and return items() in a preferred order. This is only for use in generating a YAML map representation that has a fixed order. """ def items(self): """ Return items as a ColumnOrderList, which sorts in the preferred way for column attributes. """ return ColumnOrderList(super().items()) def _construct_odict(load, node): """ Construct OrderedDict from !!omap in yaml safe load. Source: https://gist.github.com/weaver/317164 License: Unspecified This is the same as SafeConstructor.construct_yaml_omap(), except the data type is changed to OrderedDict() and setitem is used instead of append in the loop Examples -------- :: >>> yaml.load(''' # doctest: +SKIP ... !!omap ... - foo: bar ... - mumble: quux ... - baz: gorp ... ''') OrderedDict([('foo', 'bar'), ('mumble', 'quux'), ('baz', 'gorp')]) >>> yaml.load('''!!omap [ foo: bar, mumble: quux, baz : gorp ]''') # doctest: +SKIP OrderedDict([('foo', 'bar'), ('mumble', 'quux'), ('baz', 'gorp')]) """ omap = OrderedDict() yield omap if not isinstance(node, yaml.SequenceNode): raise yaml.constructor.ConstructorError( "while constructing an ordered map", node.start_mark, f"expected a sequence, but found {node.id}", node.start_mark, ) for subnode in node.value: if not isinstance(subnode, yaml.MappingNode): raise yaml.constructor.ConstructorError( "while constructing an ordered map", node.start_mark, f"expected a mapping of length 1, but found {subnode.id}", subnode.start_mark, ) if len(subnode.value) != 1: raise yaml.constructor.ConstructorError( "while constructing an ordered map", node.start_mark, f"expected a single mapping item, but found {len(subnode.value)} items", subnode.start_mark, ) key_node, value_node = subnode.value[0] key = load.construct_object(key_node) value = load.construct_object(value_node) omap[key] = value def _repr_pairs(dump, tag, sequence, flow_style=None): """ This is the same code as BaseRepresenter.represent_sequence(), but the value passed to dump.represent_data() in the loop is a dictionary instead of a tuple. Source: https://gist.github.com/weaver/317164 License: Unspecified """ value = [] node = yaml.SequenceNode(tag, value, flow_style=flow_style) if dump.alias_key is not None: dump.represented_objects[dump.alias_key] = node best_style = True for key, val in sequence: item = dump.represent_data({key: val}) if not (isinstance(item, yaml.ScalarNode) and not item.style): best_style = False value.append(item) if flow_style is None: if dump.default_flow_style is not None: node.flow_style = dump.default_flow_style else: node.flow_style = best_style return node def _repr_odict(dumper, data): """ Represent OrderedDict in yaml dump. Source: https://gist.github.com/weaver/317164 License: Unspecified >>> data = OrderedDict([('foo', 'bar'), ('mumble', 'quux'), ('baz', 'gorp')]) >>> yaml.dump(data, default_flow_style=False) # doctest: +SKIP '!!omap\\n- foo: bar\\n- mumble: quux\\n- baz: gorp\\n' >>> yaml.dump(data, default_flow_style=True) # doctest: +SKIP '!!omap [foo: bar, mumble: quux, baz: gorp]\\n' """ return _repr_pairs(dumper, "tag:yaml.org,2002:omap", data.items()) def _repr_column_dict(dumper, data): """ Represent ColumnDict in yaml dump. This is the same as an ordinary mapping except that the keys are written in a fixed order that makes sense for astropy table columns. """ return dumper.represent_mapping("tag:yaml.org,2002:map", data) def _get_variable_length_array_shape(col): """Check if object-type ``col`` is really a variable length list. That is true if the object consists purely of list of nested lists, where the shape of every item can be represented as (m, n, ..., *) where the (m, n, ...) are constant and only the lists in the last axis have variable shape. If so the returned value of shape will be a tuple in the form (m, n, ..., None). If ``col`` is a variable length array then the return ``dtype`` corresponds to the type found by numpy for all the individual values. Otherwise it will be ``np.dtype(object)``. Parameters ---------- col : column-like Input table column, assumed to be object-type Returns ------- shape : tuple Inferred variable length shape or None dtype : np.dtype Numpy dtype that applies to col """ class ConvertError(ValueError): """Local conversion error used below.""" # Numpy types supported as variable-length arrays np_classes = (np.floating, np.integer, np.bool_, np.unicode_) try: if len(col) == 0 or not all(isinstance(val, np.ndarray) for val in col): raise ConvertError dtype = col[0].dtype shape = col[0].shape[:-1] for val in col: if not issubclass(val.dtype.type, np_classes) or val.shape[:-1] != shape: raise ConvertError dtype = np.promote_types(dtype, val.dtype) shape = shape + (None,) except ConvertError: # `col` is not a variable length array, return shape and dtype to # the original. Note that this function is only called if # col.shape[1:] was () and col.info.dtype is object. dtype = col.info.dtype shape = () return shape, dtype def _get_datatype_from_dtype(dtype): """Return string version of ``dtype`` for writing to ECSV ``datatype``.""" datatype = dtype.name if datatype.startswith(("bytes", "str")): datatype = "string" if datatype.endswith("_"): datatype = datatype[:-1] # string_ and bool_ lose the final _ for ECSV return datatype def _get_col_attributes(col): """ Extract information from a column (apart from the values) that is required to fully serialize the column. Parameters ---------- col : column-like Input Table column Returns ------- attrs : dict Dict of ECSV attributes for ``col`` """ dtype = col.info.dtype # Type of column values that get written subtype = None # Type of data for object columns serialized with JSON shape = col.shape[1:] # Shape of multidim / variable length columns if dtype.name == "object": if shape == (): # 1-d object type column might be a variable length array dtype = np.dtype(str) shape, subtype = _get_variable_length_array_shape(col) else: # N-d object column is subtype object but serialized as JSON string dtype = np.dtype(str) subtype = np.dtype(object) elif shape: # N-d column which is not object is serialized as JSON string dtype = np.dtype(str) subtype = col.info.dtype datatype = _get_datatype_from_dtype(dtype) # Set the output attributes attrs = ColumnDict() attrs["name"] = col.info.name attrs["datatype"] = datatype for attr, nontrivial, xform in ( ("unit", lambda x: x is not None, str), ("format", lambda x: x is not None, None), ("description", lambda x: x is not None, None), ("meta", lambda x: x, None), ): col_attr = getattr(col.info, attr) if nontrivial(col_attr): attrs[attr] = xform(col_attr) if xform else col_attr if subtype: attrs["subtype"] = _get_datatype_from_dtype(subtype) # Numpy 'object' maps to 'subtype' of 'json' in ECSV if attrs["subtype"] == "object": attrs["subtype"] = "json" if shape: attrs["subtype"] += json.dumps(list(shape), separators=(",", ":")) return attrs def get_yaml_from_table(table): """ Return lines with a YAML representation of header content from the ``table``. Parameters ---------- table : `~astropy.table.Table` object Table for which header content is output Returns ------- lines : list List of text lines with YAML header content """ header = {"cols": list(table.columns.values())} if table.meta: header["meta"] = table.meta return get_yaml_from_header(header) def get_yaml_from_header(header): """ Return lines with a YAML representation of header content from a Table. The ``header`` dict must contain these keys: - 'cols' : list of table column objects (required) - 'meta' : table 'meta' attribute (optional) Other keys included in ``header`` will be serialized in the output YAML representation. Parameters ---------- header : dict Table header content Returns ------- lines : list List of text lines with YAML header content """ from astropy.io.misc.yaml import AstropyDumper class TableDumper(AstropyDumper): """ Custom Dumper that represents OrderedDict as an !!omap object. """ def represent_mapping(self, tag, mapping, flow_style=None): """ This is a combination of the Python 2 and 3 versions of this method in the PyYAML library to allow the required key ordering via the ColumnOrderList object. The Python 3 version insists on turning the items() mapping into a list object and sorting, which results in alphabetical order for the column keys. """ value = [] node = yaml.MappingNode(tag, value, flow_style=flow_style) if self.alias_key is not None: self.represented_objects[self.alias_key] = node best_style = True if hasattr(mapping, "items"): mapping = mapping.items() if hasattr(mapping, "sort"): mapping.sort() else: mapping = list(mapping) try: mapping = sorted(mapping) except TypeError: pass for item_key, item_value in mapping: node_key = self.represent_data(item_key) node_value = self.represent_data(item_value) if not (isinstance(node_key, yaml.ScalarNode) and not node_key.style): best_style = False if not ( isinstance(node_value, yaml.ScalarNode) and not node_value.style ): best_style = False value.append((node_key, node_value)) if flow_style is None: if self.default_flow_style is not None: node.flow_style = self.default_flow_style else: node.flow_style = best_style return node TableDumper.add_representer(OrderedDict, _repr_odict) TableDumper.add_representer(ColumnDict, _repr_column_dict) header = copy.copy(header) # Don't overwrite original header["datatype"] = [_get_col_attributes(col) for col in header["cols"]] del header["cols"] lines = yaml.dump( header, default_flow_style=None, Dumper=TableDumper, width=130 ).splitlines() return lines class YamlParseError(Exception): pass def get_header_from_yaml(lines): """ Get a header dict from input ``lines`` which should be valid YAML. This input will typically be created by get_yaml_from_header. The output is a dictionary which describes all the table and column meta. The get_cols() method in the io/ascii/ecsv.py file should be used as a guide to using the information when constructing a table using this header dict information. Parameters ---------- lines : list List of text lines with YAML header content Returns ------- header : dict Dictionary describing table and column meta """ from astropy.io.misc.yaml import AstropyLoader class TableLoader(AstropyLoader): """ Custom Loader that constructs OrderedDict from an !!omap object. This does nothing but provide a namespace for adding the custom odict constructor. """ TableLoader.add_constructor("tag:yaml.org,2002:omap", _construct_odict) # Now actually load the YAML data structure into `meta` header_yaml = textwrap.dedent("\n".join(lines)) try: header = yaml.load(header_yaml, Loader=TableLoader) except Exception as err: raise YamlParseError() from err return header
2112c4f068a977666cdfddac9748013ded5d96561bc75081b4d5838204248c93
# Licensed under a 3-clause BSD style license - see LICENSE.rst import math import numpy as np from astropy.modeling import models from astropy.modeling.core import Fittable1DModel, Fittable2DModel from .core import Kernel, Kernel1D, Kernel2D from .utils import has_even_axis, raise_even_kernel_exception __all__ = [ "Gaussian1DKernel", "Gaussian2DKernel", "CustomKernel", "Box1DKernel", "Box2DKernel", "Tophat2DKernel", "Trapezoid1DKernel", "RickerWavelet1DKernel", "RickerWavelet2DKernel", "AiryDisk2DKernel", "Moffat2DKernel", "Model1DKernel", "Model2DKernel", "TrapezoidDisk2DKernel", "Ring2DKernel", ] def _round_up_to_odd_integer(value): i = math.ceil(value) if i % 2 == 0: return i + 1 else: return i class Gaussian1DKernel(Kernel1D): """ 1D Gaussian filter kernel. The Gaussian filter is a filter with great smoothing properties. It is isotropic and does not produce artifacts. The generated kernel is normalized so that it integrates to 1. Parameters ---------- stddev : number Standard deviation of the Gaussian kernel. x_size : int, optional Size of the kernel array. Default = ⌊8*stddev+1⌋. mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by linearly interpolating between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. Very slow. factor : number, optional Factor of oversampling. Default factor = 10. If the factor is too large, evaluation can be very slow. See Also -------- Box1DKernel, Trapezoid1DKernel, RickerWavelet1DKernel Examples -------- Kernel response: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.convolution import Gaussian1DKernel gauss_1D_kernel = Gaussian1DKernel(10) plt.plot(gauss_1D_kernel, drawstyle='steps') plt.xlabel('x [pixels]') plt.ylabel('value') plt.show() """ _separable = True _is_bool = False def __init__(self, stddev, **kwargs): self._model = models.Gaussian1D(1.0 / (np.sqrt(2 * np.pi) * stddev), 0, stddev) self._default_size = _round_up_to_odd_integer(8 * stddev) super().__init__(**kwargs) self.normalize() class Gaussian2DKernel(Kernel2D): """ 2D Gaussian filter kernel. The Gaussian filter is a filter with great smoothing properties. It is isotropic and does not produce artifacts. The generated kernel is normalized so that it integrates to 1. Parameters ---------- x_stddev : float Standard deviation of the Gaussian in x before rotating by theta. y_stddev : float Standard deviation of the Gaussian in y before rotating by theta. theta : float or `~astropy.units.Quantity` ['angle'] Rotation angle. If passed as a float, it is assumed to be in radians. The rotation angle increases counterclockwise. x_size : int, optional Size in x direction of the kernel array. Default = ⌊8*stddev + 1⌋. y_size : int, optional Size in y direction of the kernel array. Default = ⌊8*stddev + 1⌋. mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by performing a bilinear interpolation between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. See Also -------- Box2DKernel, Tophat2DKernel, RickerWavelet2DKernel, Ring2DKernel, TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel Examples -------- Kernel response: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.convolution import Gaussian2DKernel gaussian_2D_kernel = Gaussian2DKernel(10) plt.imshow(gaussian_2D_kernel, interpolation='none', origin='lower') plt.xlabel('x [pixels]') plt.ylabel('y [pixels]') plt.colorbar() plt.show() """ _separable = True _is_bool = False def __init__(self, x_stddev, y_stddev=None, theta=0.0, **kwargs): if y_stddev is None: y_stddev = x_stddev self._model = models.Gaussian2D( amplitude=1.0 / (2 * np.pi * x_stddev * y_stddev), x_mean=0, y_mean=0, x_stddev=x_stddev, y_stddev=y_stddev, theta=theta, ) self._default_size = _round_up_to_odd_integer(8 * np.max([x_stddev, y_stddev])) super().__init__(**kwargs) self.normalize() class Box1DKernel(Kernel1D): """ 1D Box filter kernel. The Box filter or running mean is a smoothing filter. It is not isotropic and can produce artifacts when applied repeatedly to the same data. The generated kernel is normalized so that it integrates to 1. By default the Box kernel uses the ``linear_interp`` discretization mode, which allows non-shifting, even-sized kernels. This is achieved by weighting the edge pixels with 1/2. E.g a Box kernel with an effective smoothing of 4 pixel would have the following array: [0.5, 1, 1, 1, 0.5]. Parameters ---------- width : number Width of the filter kernel. mode : {'linear_interp', 'center', 'oversample', 'integrate'}, optional One of the following discretization modes: * 'linear_interp' (default) Discretize model by linearly interpolating between the values at the corners of the bin. * 'center' Discretize model by taking the value at the center of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. See Also -------- Gaussian1DKernel, Trapezoid1DKernel, RickerWavelet1DKernel Examples -------- Kernel response function: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.convolution import Box1DKernel box_1D_kernel = Box1DKernel(9) plt.plot(box_1D_kernel, drawstyle='steps') plt.xlim(-1, 9) plt.xlabel('x [pixels]') plt.ylabel('value') plt.show() """ _separable = True _is_bool = True def __init__(self, width, **kwargs): self._model = models.Box1D(1.0 / width, 0, width) self._default_size = _round_up_to_odd_integer(width) kwargs["mode"] = "linear_interp" super().__init__(**kwargs) self.normalize() class Box2DKernel(Kernel2D): """ 2D Box filter kernel. The Box filter or running mean is a smoothing filter. It is not isotropic and can produce artifacts when applied repeatedly to the same data. The generated kernel is normalized so that it integrates to 1. By default the Box kernel uses the ``linear_interp`` discretization mode, which allows non-shifting, even-sized kernels. This is achieved by weighting the edge pixels with 1/2. Parameters ---------- width : number Width of the filter kernel. mode : {'linear_interp', 'center', 'oversample', 'integrate'}, optional One of the following discretization modes: * 'linear_interp' (default) Discretize model by performing a bilinear interpolation between the values at the corners of the bin. * 'center' Discretize model by taking the value at the center of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. See Also -------- Gaussian2DKernel, Tophat2DKernel, RickerWavelet2DKernel, Ring2DKernel, TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel Examples -------- Kernel response: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.convolution import Box2DKernel box_2D_kernel = Box2DKernel(9) plt.imshow(box_2D_kernel, interpolation='none', origin='lower', vmin=0.0, vmax=0.015) plt.xlim(-1, 9) plt.ylim(-1, 9) plt.xlabel('x [pixels]') plt.ylabel('y [pixels]') plt.colorbar() plt.show() """ _separable = True _is_bool = True def __init__(self, width, **kwargs): self._model = models.Box2D(1.0 / width**2, 0, 0, width, width) self._default_size = _round_up_to_odd_integer(width) kwargs["mode"] = "linear_interp" super().__init__(**kwargs) self.normalize() class Tophat2DKernel(Kernel2D): """ 2D Tophat filter kernel. The Tophat filter is an isotropic smoothing filter. It can produce artifacts when applied repeatedly on the same data. The generated kernel is normalized so that it integrates to 1. Parameters ---------- radius : int Radius of the filter kernel. mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by performing a bilinear interpolation between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. See Also -------- Gaussian2DKernel, Box2DKernel, RickerWavelet2DKernel, Ring2DKernel, TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel Examples -------- Kernel response: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.convolution import Tophat2DKernel tophat_2D_kernel = Tophat2DKernel(40) plt.imshow(tophat_2D_kernel, interpolation='none', origin='lower') plt.xlabel('x [pixels]') plt.ylabel('y [pixels]') plt.colorbar() plt.show() """ def __init__(self, radius, **kwargs): self._model = models.Disk2D(1.0 / (np.pi * radius**2), 0, 0, radius) self._default_size = _round_up_to_odd_integer(2 * radius) super().__init__(**kwargs) self.normalize() class Ring2DKernel(Kernel2D): """ 2D Ring filter kernel. The Ring filter kernel is the difference between two Tophat kernels of different width. This kernel is useful for, e.g., background estimation. The generated kernel is normalized so that it integrates to 1. Parameters ---------- radius_in : number Inner radius of the ring kernel. width : number Width of the ring kernel. mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by performing a bilinear interpolation between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. See Also -------- Gaussian2DKernel, Box2DKernel, Tophat2DKernel, RickerWavelet2DKernel, TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel Examples -------- Kernel response: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.convolution import Ring2DKernel ring_2D_kernel = Ring2DKernel(9, 8) plt.imshow(ring_2D_kernel, interpolation='none', origin='lower') plt.xlabel('x [pixels]') plt.ylabel('y [pixels]') plt.colorbar() plt.show() """ def __init__(self, radius_in, width, **kwargs): radius_out = radius_in + width self._model = models.Ring2D( 1.0 / (np.pi * (radius_out**2 - radius_in**2)), 0, 0, radius_in, width ) self._default_size = _round_up_to_odd_integer(2 * radius_out) super().__init__(**kwargs) self.normalize() class Trapezoid1DKernel(Kernel1D): """ 1D trapezoid kernel. The generated kernel is normalized so that it integrates to 1. Parameters ---------- width : number Width of the filter kernel, defined as the width of the constant part, before it begins to slope down. slope : number Slope of the filter kernel's tails mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by linearly interpolating between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. See Also -------- Box1DKernel, Gaussian1DKernel, RickerWavelet1DKernel Examples -------- Kernel response: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.convolution import Trapezoid1DKernel trapezoid_1D_kernel = Trapezoid1DKernel(17, slope=0.2) plt.plot(trapezoid_1D_kernel, drawstyle='steps') plt.xlabel('x [pixels]') plt.ylabel('amplitude') plt.xlim(-1, 28) plt.show() """ _is_bool = False def __init__(self, width, slope=1.0, **kwargs): self._model = models.Trapezoid1D(1, 0, width, slope) self._default_size = _round_up_to_odd_integer(width + 2.0 / slope) super().__init__(**kwargs) self.normalize() class TrapezoidDisk2DKernel(Kernel2D): """ 2D trapezoid kernel. The generated kernel is normalized so that it integrates to 1. Parameters ---------- radius : number Width of the filter kernel, defined as the width of the constant part, before it begins to slope down. slope : number Slope of the filter kernel's tails mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by performing a bilinear interpolation between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. See Also -------- Gaussian2DKernel, Box2DKernel, Tophat2DKernel, RickerWavelet2DKernel, Ring2DKernel, AiryDisk2DKernel, Moffat2DKernel Examples -------- Kernel response: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.convolution import TrapezoidDisk2DKernel trapezoid_2D_kernel = TrapezoidDisk2DKernel(20, slope=0.2) plt.imshow(trapezoid_2D_kernel, interpolation='none', origin='lower') plt.xlabel('x [pixels]') plt.ylabel('y [pixels]') plt.colorbar() plt.show() """ _is_bool = False def __init__(self, radius, slope=1.0, **kwargs): self._model = models.TrapezoidDisk2D(1, 0, 0, radius, slope) self._default_size = _round_up_to_odd_integer(2 * radius + 2.0 / slope) super().__init__(**kwargs) self.normalize() class RickerWavelet1DKernel(Kernel1D): """ 1D Ricker wavelet filter kernel (sometimes known as a "Mexican Hat" kernel). The Ricker wavelet, or inverted Gaussian-Laplace filter, is a bandpass filter. It smooths the data and removes slowly varying or constant structures (e.g. Background). It is useful for peak or multi-scale detection. This kernel is derived from a normalized Gaussian function, by computing the second derivative. This results in an amplitude at the kernels center of 1. / (sqrt(2 * pi) * width ** 3). The normalization is the same as for `scipy.ndimage.gaussian_laplace`, except for a minus sign. .. note:: See https://github.com/astropy/astropy/pull/9445 for discussions related to renaming of this kernel. Parameters ---------- width : number Width of the filter kernel, defined as the standard deviation of the Gaussian function from which it is derived. x_size : int, optional Size in x direction of the kernel array. Default = ⌊8*width +1⌋. mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by linearly interpolating between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. See Also -------- Box1DKernel, Gaussian1DKernel, Trapezoid1DKernel Examples -------- Kernel response: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.convolution import RickerWavelet1DKernel ricker_1d_kernel = RickerWavelet1DKernel(10) plt.plot(ricker_1d_kernel, drawstyle='steps') plt.xlabel('x [pixels]') plt.ylabel('value') plt.show() """ _is_bool = True def __init__(self, width, **kwargs): amplitude = 1.0 / (np.sqrt(2 * np.pi) * width**3) self._model = models.RickerWavelet1D(amplitude, 0, width) self._default_size = _round_up_to_odd_integer(8 * width) super().__init__(**kwargs) class RickerWavelet2DKernel(Kernel2D): """ 2D Ricker wavelet filter kernel (sometimes known as a "Mexican Hat" kernel). The Ricker wavelet, or inverted Gaussian-Laplace filter, is a bandpass filter. It smooths the data and removes slowly varying or constant structures (e.g. Background). It is useful for peak or multi-scale detection. This kernel is derived from a normalized Gaussian function, by computing the second derivative. This results in an amplitude at the kernels center of 1. / (pi * width ** 4). The normalization is the same as for `scipy.ndimage.gaussian_laplace`, except for a minus sign. .. note:: See https://github.com/astropy/astropy/pull/9445 for discussions related to renaming of this kernel. Parameters ---------- width : number Width of the filter kernel, defined as the standard deviation of the Gaussian function from which it is derived. x_size : int, optional Size in x direction of the kernel array. Default = ⌊8*width +1⌋. y_size : int, optional Size in y direction of the kernel array. Default = ⌊8*width +1⌋. mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by performing a bilinear interpolation between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. See Also -------- Gaussian2DKernel, Box2DKernel, Tophat2DKernel, Ring2DKernel, TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel Examples -------- Kernel response: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.convolution import RickerWavelet2DKernel ricker_2d_kernel = RickerWavelet2DKernel(10) plt.imshow(ricker_2d_kernel, interpolation='none', origin='lower') plt.xlabel('x [pixels]') plt.ylabel('y [pixels]') plt.colorbar() plt.show() """ _is_bool = False def __init__(self, width, **kwargs): amplitude = 1.0 / (np.pi * width**4) self._model = models.RickerWavelet2D(amplitude, 0, 0, width) self._default_size = _round_up_to_odd_integer(8 * width) super().__init__(**kwargs) class AiryDisk2DKernel(Kernel2D): """ 2D Airy disk kernel. This kernel models the diffraction pattern of a circular aperture. The generated kernel is normalized so that it integrates to 1. Parameters ---------- radius : float The radius of the Airy disk kernel (radius of the first zero). x_size : int, optional Size in x direction of the kernel array. Default = ⌊8*radius + 1⌋. y_size : int, optional Size in y direction of the kernel array. Default = ⌊8*radius + 1⌋. mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by performing a bilinear interpolation between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. See Also -------- Gaussian2DKernel, Box2DKernel, Tophat2DKernel, RickerWavelet2DKernel, Ring2DKernel, TrapezoidDisk2DKernel, Moffat2DKernel Examples -------- Kernel response: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.convolution import AiryDisk2DKernel airydisk_2D_kernel = AiryDisk2DKernel(10) plt.imshow(airydisk_2D_kernel, interpolation='none', origin='lower') plt.xlabel('x [pixels]') plt.ylabel('y [pixels]') plt.colorbar() plt.show() """ _is_bool = False def __init__(self, radius, **kwargs): self._model = models.AiryDisk2D(1, 0, 0, radius) self._default_size = _round_up_to_odd_integer(8 * radius) super().__init__(**kwargs) self.normalize() class Moffat2DKernel(Kernel2D): """ 2D Moffat kernel. This kernel is a typical model for a seeing limited PSF. The generated kernel is normalized so that it integrates to 1. Parameters ---------- gamma : float Core width of the Moffat model. alpha : float Power index of the Moffat model. x_size : int, optional Size in x direction of the kernel array. Default = ⌊8*radius + 1⌋. y_size : int, optional Size in y direction of the kernel array. Default = ⌊8*radius + 1⌋. mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by performing a bilinear interpolation between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. See Also -------- Gaussian2DKernel, Box2DKernel, Tophat2DKernel, RickerWavelet2DKernel, Ring2DKernel, TrapezoidDisk2DKernel, AiryDisk2DKernel Examples -------- Kernel response: .. plot:: :include-source: import matplotlib.pyplot as plt from astropy.convolution import Moffat2DKernel moffat_2D_kernel = Moffat2DKernel(3, 2) plt.imshow(moffat_2D_kernel, interpolation='none', origin='lower') plt.xlabel('x [pixels]') plt.ylabel('y [pixels]') plt.colorbar() plt.show() """ _is_bool = False def __init__(self, gamma, alpha, **kwargs): # Compute amplitude, from # https://en.wikipedia.org/wiki/Moffat_distribution amplitude = (alpha - 1.0) / (np.pi * gamma * gamma) self._model = models.Moffat2D(amplitude, 0, 0, gamma, alpha) self._default_size = _round_up_to_odd_integer(4.0 * self._model.fwhm) super().__init__(**kwargs) self.normalize() class Model1DKernel(Kernel1D): """ Create kernel from 1D model. The model has to be centered on x = 0. Parameters ---------- model : `~astropy.modeling.Fittable1DModel` Kernel response function model x_size : int, optional Size in x direction of the kernel array. Default = ⌊8*width +1⌋. Must be odd. mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by linearly interpolating between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. Raises ------ TypeError If model is not an instance of `~astropy.modeling.Fittable1DModel` See Also -------- Model2DKernel : Create kernel from `~astropy.modeling.Fittable2DModel` CustomKernel : Create kernel from list or array Examples -------- Define a Gaussian1D model: >>> from astropy.modeling.models import Gaussian1D >>> from astropy.convolution.kernels import Model1DKernel >>> gauss = Gaussian1D(1, 0, 2) And create a custom one dimensional kernel from it: >>> gauss_kernel = Model1DKernel(gauss, x_size=9) This kernel can now be used like a usual Astropy kernel. """ _separable = False _is_bool = False def __init__(self, model, **kwargs): if isinstance(model, Fittable1DModel): self._model = model else: raise TypeError("Must be Fittable1DModel") super().__init__(**kwargs) class Model2DKernel(Kernel2D): """ Create kernel from 2D model. The model has to be centered on x = 0 and y = 0. Parameters ---------- model : `~astropy.modeling.Fittable2DModel` Kernel response function model x_size : int, optional Size in x direction of the kernel array. Default = ⌊8*width +1⌋. Must be odd. y_size : int, optional Size in y direction of the kernel array. Default = ⌊8*width +1⌋. mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional One of the following discretization modes: * 'center' (default) Discretize model by taking the value at the center of the bin. * 'linear_interp' Discretize model by performing a bilinear interpolation between the values at the corners of the bin. * 'oversample' Discretize model by taking the average on an oversampled grid. * 'integrate' Discretize model by integrating the model over the bin. factor : number, optional Factor of oversampling. Default factor = 10. Raises ------ TypeError If model is not an instance of `~astropy.modeling.Fittable2DModel` See Also -------- Model1DKernel : Create kernel from `~astropy.modeling.Fittable1DModel` CustomKernel : Create kernel from list or array Examples -------- Define a Gaussian2D model: >>> from astropy.modeling.models import Gaussian2D >>> from astropy.convolution.kernels import Model2DKernel >>> gauss = Gaussian2D(1, 0, 0, 2, 2) And create a custom two dimensional kernel from it: >>> gauss_kernel = Model2DKernel(gauss, x_size=9) This kernel can now be used like a usual astropy kernel. """ _is_bool = False _separable = False def __init__(self, model, **kwargs): self._separable = False if isinstance(model, Fittable2DModel): self._model = model else: raise TypeError("Must be Fittable2DModel") super().__init__(**kwargs) class CustomKernel(Kernel): """ Create filter kernel from list or array. Parameters ---------- array : list or array Filter kernel array. Size must be odd. Raises ------ TypeError If array is not a list or array. `~astropy.convolution.KernelSizeError` If array size is even. See Also -------- Model2DKernel, Model1DKernel Examples -------- Define one dimensional array: >>> from astropy.convolution.kernels import CustomKernel >>> import numpy as np >>> array = np.array([1, 2, 3, 2, 1]) >>> kernel = CustomKernel(array) >>> kernel.dimension 1 Define two dimensional array: >>> array = np.array([[1, 1, 1], [1, 2, 1], [1, 1, 1]]) >>> kernel = CustomKernel(array) >>> kernel.dimension 2 """ def __init__(self, array): self.array = array super().__init__(self._array) @property def array(self): """ Filter kernel array. """ return self._array @array.setter def array(self, array): """ Filter kernel array setter. """ if isinstance(array, np.ndarray): self._array = array.astype(np.float64) elif isinstance(array, list): self._array = np.array(array, dtype=np.float64) else: raise TypeError("Must be list or array.") # Check if array is odd in all axes if has_even_axis(self): raise_even_kernel_exception() # Check if array is bool ones = self._array == 1.0 zeros = self._array == 0 self._is_bool = bool(np.all(np.logical_or(ones, zeros)))
68398ae98226a3ef96de377e1ab458df9ac68f18d86c3953253956358672cf1c
# Licensed under a 3-clause BSD style license - see LICENSE.rst import warnings from functools import partial import numpy as np from astropy import units as u from astropy.modeling.convolution import Convolution from astropy.modeling.core import SPECIAL_OPERATORS, CompoundModel from astropy.nddata import support_nddata from astropy.utils.console import human_file_size from astropy.utils.exceptions import AstropyUserWarning from ._convolve import _convolveNd_c from .core import MAX_NORMALIZATION, Kernel, Kernel1D, Kernel2D from .utils import KernelSizeError, has_even_axis, raise_even_kernel_exception # np.unique([scipy.fft.next_fast_len(i, real=True) for i in range(10000)]) # fmt: off _good_sizes = np.array( [ 0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24, 25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80, 81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192, 200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384, 400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675, 720, 729, 750, 768, 800, 810, 864, 900, 960, 972, 1000, 1024, 1080, 1125, 1152, 1200, 1215, 1250, 1280, 1296, 1350, 1440, 1458, 1500, 1536, 1600, 1620, 1728, 1800, 1875, 1920, 1944, 2000, 2025, 2048, 2160, 2187, 2250, 2304, 2400, 2430, 2500, 2560, 2592, 2700, 2880, 2916, 3000, 3072, 3125, 3200, 3240, 3375, 3456, 3600, 3645, 3750, 3840, 3888, 4000, 4050, 4096, 4320, 4374, 4500, 4608, 4800, 4860, 5000, 5120, 5184, 5400, 5625, 5760, 5832, 6000, 6075, 6144, 6250, 6400, 6480, 6561, 6750, 6912, 7200, 7290, 7500, 7680, 7776, 8000, 8100, 8192, 8640, 8748, 9000, 9216, 9375, 9600, 9720, 10000, ] ) # fmt: on _good_range = int(np.log10(_good_sizes[-1])) # Disabling doctests when scipy isn't present. __doctest_requires__ = {("convolve_fft",): ["scipy.fft"]} BOUNDARY_OPTIONS = [None, "fill", "wrap", "extend"] def _next_fast_lengths(shape): """ Find optimal or good sizes to pad an array of ``shape`` to for better performance with `numpy.fft.*fft` and `scipy.fft.*fft`. Calculated directly with `scipy.fft.next_fast_len`, if available; otherwise looked up from list and scaled by powers of 10, if necessary. """ try: import scipy.fft return np.array([scipy.fft.next_fast_len(j) for j in shape]) except ImportError: pass newshape = np.empty(len(np.atleast_1d(shape)), dtype=int) for i, j in enumerate(shape): scale = 10 ** max(int(np.ceil(np.log10(j))) - _good_range, 0) for n in _good_sizes: if n * scale >= j: newshape[i] = n * scale break else: raise ValueError( f"No next fast length for {j} found in list of _good_sizes " f"<= {_good_sizes[-1] * scale}." ) return newshape def _copy_input_if_needed( input, dtype=float, order="C", nan_treatment=None, mask=None, fill_value=None ): # Alias input input = input.array if isinstance(input, Kernel) else input # strip quantity attributes if hasattr(input, "unit"): input = input.value output = input # Copy input try: # Anything that's masked must be turned into NaNs for the interpolation. # This requires copying. A copy is also needed for nan_treatment == 'fill' # A copy prevents possible function side-effects of the input array. if nan_treatment == "fill" or np.ma.is_masked(input) or mask is not None: if np.ma.is_masked(input): # ``np.ma.maskedarray.filled()`` returns a copy, however there # is no way to specify the return type or order etc. In addition # ``np.nan`` is a ``float`` and there is no conversion to an # ``int`` type. Therefore, a pre-fill copy is needed for non # ``float`` masked arrays. ``subok=True`` is needed to retain # ``np.ma.maskedarray.filled()``. ``copy=False`` allows the fill # to act as the copy if type and order are already correct. output = np.array( input, dtype=dtype, copy=False, order=order, subok=True ) output = output.filled(fill_value) else: # Since we're making a copy, we might as well use `subok=False` to save, # what is probably, a negligible amount of memory. output = np.array( input, dtype=dtype, copy=True, order=order, subok=False ) if mask is not None: # mask != 0 yields a bool mask for all ints/floats/bool output[mask != 0] = fill_value else: # The call below is synonymous with np.asanyarray(array, ftype=float, order='C') # The advantage of `subok=True` is that it won't copy when array is an ndarray subclass. # If it is and `subok=False` (default), then it will copy even if `copy=False`. This # uses less memory when ndarray subclasses are passed in. output = np.array(input, dtype=dtype, copy=False, order=order, subok=True) except (TypeError, ValueError) as e: raise TypeError( "input should be a Numpy array or something convertible into a float array", e, ) return output @support_nddata(data="array") def convolve( array, kernel, boundary="fill", fill_value=0.0, nan_treatment="interpolate", normalize_kernel=True, mask=None, preserve_nan=False, normalization_zero_tol=1e-8, ): """ Convolve an array with a kernel. This routine differs from `scipy.ndimage.convolve` because it includes a special treatment for ``NaN`` values. Rather than including ``NaN`` values in the array in the convolution calculation, which causes large ``NaN`` holes in the convolved array, ``NaN`` values are replaced with interpolated values using the kernel as an interpolation function. Parameters ---------- array : `~astropy.nddata.NDData` or array-like The array to convolve. This should be a 1, 2, or 3-dimensional array or a list or a set of nested lists representing a 1, 2, or 3-dimensional array. If an `~astropy.nddata.NDData`, the ``mask`` of the `~astropy.nddata.NDData` will be used as the ``mask`` argument. kernel : `numpy.ndarray` or `~astropy.convolution.Kernel` The convolution kernel. The number of dimensions should match those for the array, and the dimensions should be odd in all directions. If a masked array, the masked values will be replaced by ``fill_value``. boundary : str, optional A flag indicating how to handle boundaries: * `None` Set the ``result`` values to zero where the kernel extends beyond the edge of the array. * 'fill' Set values outside the array boundary to ``fill_value`` (default). * 'wrap' Periodic boundary that wrap to the other side of ``array``. * 'extend' Set values outside the array to the nearest ``array`` value. fill_value : float, optional The value to use outside the array when using ``boundary='fill'``. normalize_kernel : bool, optional Whether to normalize the kernel to have a sum of one. nan_treatment : {'interpolate', 'fill'}, optional The method used to handle NaNs in the input ``array``: * ``'interpolate'``: ``NaN`` values are replaced with interpolated values using the kernel as an interpolation function. Note that if the kernel has a sum equal to zero, NaN interpolation is not possible and will raise an exception. * ``'fill'``: ``NaN`` values are replaced by ``fill_value`` prior to convolution. preserve_nan : bool, optional After performing convolution, should pixels that were originally NaN again become NaN? mask : None or ndarray, optional A "mask" array. Shape must match ``array``, and anything that is masked (i.e., not 0/`False`) will be set to NaN for the convolution. If `None`, no masking will be performed unless ``array`` is a masked array. If ``mask`` is not `None` *and* ``array`` is a masked array, a pixel is masked if it is masked in either ``mask`` *or* ``array.mask``. normalization_zero_tol : float, optional The absolute tolerance on whether the kernel is different than zero. If the kernel sums to zero to within this precision, it cannot be normalized. Default is "1e-8". Returns ------- result : `numpy.ndarray` An array with the same dimensions and as the input array, convolved with kernel. The data type depends on the input array type. If array is a floating point type, then the return array keeps the same data type, otherwise the type is ``numpy.float``. Notes ----- For masked arrays, masked values are treated as NaNs. The convolution is always done at ``numpy.float`` precision. """ if boundary not in BOUNDARY_OPTIONS: raise ValueError(f"Invalid boundary option: must be one of {BOUNDARY_OPTIONS}") if nan_treatment not in ("interpolate", "fill"): raise ValueError("nan_treatment must be one of 'interpolate','fill'") # OpenMP support is disabled at the C src code level, changing this will have # no effect. n_threads = 1 # Keep refs to originals passed_kernel = kernel passed_array = array # The C routines all need float type inputs (so, a particular # bit size, endianness, etc.). So we have to convert, which also # has the effect of making copies so we don't modify the inputs. # After this, the variables we work with will be array_internal, and # kernel_internal. However -- we do want to keep track of what type # the input array was so we can cast the result to that at the end # if it's a floating point type. Don't bother with this for lists -- # just always push those as float. # It is always necessary to make a copy of kernel (since it is modified), # but, if we just so happen to be lucky enough to have the input array # have exactly the desired type, we just alias to array_internal # Convert kernel to ndarray if not already # Copy or alias array to array_internal array_internal = _copy_input_if_needed( passed_array, dtype=float, order="C", nan_treatment=nan_treatment, mask=mask, fill_value=np.nan, ) array_dtype = getattr(passed_array, "dtype", array_internal.dtype) # Copy or alias kernel to kernel_internal kernel_internal = _copy_input_if_needed( passed_kernel, dtype=float, order="C", nan_treatment=None, mask=None, fill_value=fill_value, ) # Make sure kernel has all odd axes if has_even_axis(kernel_internal): raise_even_kernel_exception() # If both image array and kernel are Kernel instances # constrain convolution method # This must occur before the main alias/copy of ``passed_kernel`` to # ``kernel_internal`` as it is used for filling masked kernels. if isinstance(passed_array, Kernel) and isinstance(passed_kernel, Kernel): warnings.warn( "Both array and kernel are Kernel instances, hardwiring " "the following parameters: boundary='fill', fill_value=0," " normalize_Kernel=True, nan_treatment='interpolate'", AstropyUserWarning, ) boundary = "fill" fill_value = 0 normalize_kernel = True nan_treatment = "interpolate" # ----------------------------------------------------------------------- # From this point onwards refer only to ``array_internal`` and # ``kernel_internal``. # Assume both are base np.ndarrays and NOT subclasses e.g. NOT # ``Kernel`` nor ``np.ma.maskedarray`` classes. # ----------------------------------------------------------------------- # Check dimensionality if array_internal.ndim == 0: raise Exception("cannot convolve 0-dimensional arrays") elif array_internal.ndim > 3: raise NotImplementedError( "convolve only supports 1, 2, and 3-dimensional arrays at this time" ) elif array_internal.ndim != kernel_internal.ndim: raise Exception("array and kernel have differing number of dimensions.") array_shape = np.array(array_internal.shape) kernel_shape = np.array(kernel_internal.shape) pad_width = kernel_shape // 2 # For boundary=None only the center space is convolved. All array indices within a # distance kernel.shape//2 from the edge are completely ignored (zeroed). # E.g. (1D list) only the indices len(kernel)//2 : len(array)-len(kernel)//2 # are convolved. It is therefore not possible to use this method to convolve an # array by a kernel that is larger (see note below) than the array - as ALL pixels # would be ignored leaving an array of only zeros. # Note: For even kernels the correctness condition is array_shape > kernel_shape. # For odd kernels it is: # array_shape >= kernel_shape OR # array_shape > kernel_shape-1 OR # array_shape > 2*(kernel_shape//2). # Since the latter is equal to the former two for even lengths, the latter condition is # complete. if boundary is None and not np.all(array_shape > 2 * pad_width): raise KernelSizeError( "for boundary=None all kernel axes must be smaller than array's - " "use boundary in ['fill', 'extend', 'wrap'] instead." ) # NaN interpolation significantly slows down the C convolution # computation. Since nan_treatment = 'interpolate', is the default # check whether it is even needed, if not, don't interpolate. # NB: np.isnan(array_internal.sum()) is faster than np.isnan(array_internal).any() nan_interpolate = (nan_treatment == "interpolate") and np.isnan( array_internal.sum() ) # Check if kernel is normalizable if normalize_kernel or nan_interpolate: kernel_sum = kernel_internal.sum() kernel_sums_to_zero = np.isclose(kernel_sum, 0, atol=normalization_zero_tol) if kernel_sum < 1.0 / MAX_NORMALIZATION or kernel_sums_to_zero: if nan_interpolate: raise ValueError( "Setting nan_treatment='interpolate' " "requires the kernel to be normalized, " "but the input kernel has a sum close " "to zero. For a zero-sum kernel and " "data with NaNs, set nan_treatment='fill'." ) else: raise ValueError( "The kernel can't be normalized, because " "its sum is close to zero. The sum of the " f"given kernel is < {1.0 / MAX_NORMALIZATION}" ) # Mark the NaN values so we can replace them later if interpolate_nan is # not set if preserve_nan or nan_treatment == "fill": initially_nan = np.isnan(array_internal) if nan_treatment == "fill": array_internal[initially_nan] = fill_value # Avoid any memory allocation within the C code. Allocate output array # here and pass through instead. result = np.zeros(array_internal.shape, dtype=float, order="C") embed_result_within_padded_region = True array_to_convolve = array_internal if boundary in ("fill", "extend", "wrap"): embed_result_within_padded_region = False if boundary == "fill": # This method is faster than using numpy.pad(..., mode='constant') array_to_convolve = np.full( array_shape + 2 * pad_width, fill_value=fill_value, dtype=float, order="C", ) # Use bounds [pad_width[0]:array_shape[0]+pad_width[0]] instead of # [pad_width[0]:-pad_width[0]] # to account for when the kernel has size of 1 making pad_width = 0. if array_internal.ndim == 1: array_to_convolve[ pad_width[0] : array_shape[0] + pad_width[0] ] = array_internal elif array_internal.ndim == 2: array_to_convolve[ pad_width[0] : array_shape[0] + pad_width[0], pad_width[1] : array_shape[1] + pad_width[1], ] = array_internal else: array_to_convolve[ pad_width[0] : array_shape[0] + pad_width[0], pad_width[1] : array_shape[1] + pad_width[1], pad_width[2] : array_shape[2] + pad_width[2], ] = array_internal else: np_pad_mode_dict = {"fill": "constant", "extend": "edge", "wrap": "wrap"} np_pad_mode = np_pad_mode_dict[boundary] pad_width = kernel_shape // 2 if array_internal.ndim == 1: np_pad_width = (pad_width[0],) elif array_internal.ndim == 2: np_pad_width = ((pad_width[0],), (pad_width[1],)) else: np_pad_width = ((pad_width[0],), (pad_width[1],), (pad_width[2],)) array_to_convolve = np.pad( array_internal, pad_width=np_pad_width, mode=np_pad_mode ) _convolveNd_c( result, array_to_convolve, kernel_internal, nan_interpolate, embed_result_within_padded_region, n_threads, ) # So far, normalization has only occurred for nan_treatment == 'interpolate' # because this had to happen within the C extension so as to ignore # any NaNs if normalize_kernel: if not nan_interpolate: result /= kernel_sum elif nan_interpolate: result *= kernel_sum if nan_interpolate and not preserve_nan and np.isnan(result.sum()): warnings.warn( "nan_treatment='interpolate', however, NaN values detected " "post convolution. A contiguous region of NaN values, larger " "than the kernel size, are present in the input array. " "Increase the kernel size to avoid this.", AstropyUserWarning, ) if preserve_nan: result[initially_nan] = np.nan # Convert result to original data type array_unit = getattr(passed_array, "unit", None) if array_unit is not None: result <<= array_unit if isinstance(passed_array, Kernel): if isinstance(passed_array, Kernel1D): new_result = Kernel1D(array=result) elif isinstance(passed_array, Kernel2D): new_result = Kernel2D(array=result) else: raise TypeError("Only 1D and 2D Kernels are supported.") new_result._is_bool = False new_result._separable = passed_array._separable if isinstance(passed_kernel, Kernel): new_result._separable = new_result._separable and passed_kernel._separable return new_result elif array_dtype.kind == "f": # Try to preserve the input type if it's a floating point type # Avoid making another copy if possible try: return result.astype(array_dtype, copy=False) except TypeError: return result.astype(array_dtype) else: return result @support_nddata(data="array") def convolve_fft( array, kernel, boundary="fill", fill_value=0.0, nan_treatment="interpolate", normalize_kernel=True, normalization_zero_tol=1e-8, preserve_nan=False, mask=None, crop=True, return_fft=False, fft_pad=None, psf_pad=None, min_wt=0.0, allow_huge=False, fftn=np.fft.fftn, ifftn=np.fft.ifftn, complex_dtype=complex, dealias=False, ): """ Convolve an ndarray with an nd-kernel. Returns a convolved image with ``shape = array.shape``. Assumes kernel is centered. `convolve_fft` is very similar to `convolve` in that it replaces ``NaN`` values in the original image with interpolated values using the kernel as an interpolation function. However, it also includes many additional options specific to the implementation. `convolve_fft` differs from `scipy.signal.fftconvolve` in a few ways: * It can treat ``NaN`` values as zeros or interpolate over them. * ``inf`` values are treated as ``NaN`` * It optionally pads to the nearest faster sizes to improve FFT speed. These sizes are optimized for the numpy and scipy implementations, and ``fftconvolve`` uses them by default as well; when using other external functions (see below), results may vary. * Its only valid ``mode`` is 'same' (i.e., the same shape array is returned) * It lets you use your own fft, e.g., `pyFFTW <https://pypi.org/project/pyFFTW/>`_ or `pyFFTW3 <https://pypi.org/project/PyFFTW3/0.2.1/>`_ , which can lead to performance improvements, depending on your system configuration. pyFFTW3 is threaded, and therefore may yield significant performance benefits on multi-core machines at the cost of greater memory requirements. Specify the ``fftn`` and ``ifftn`` keywords to override the default, which is `numpy.fft.fftn` and `numpy.fft.ifftn`. The `scipy.fft` functions also offer somewhat better performance and a multi-threaded option. Parameters ---------- array : `numpy.ndarray` Array to be convolved with ``kernel``. It can be of any dimensionality, though only 1, 2, and 3d arrays have been tested. kernel : `numpy.ndarray` or `astropy.convolution.Kernel` The convolution kernel. The number of dimensions should match those for the array. The dimensions *do not* have to be odd in all directions, unlike in the non-fft `convolve` function. The kernel will be normalized if ``normalize_kernel`` is set. It is assumed to be centered (i.e., shifts may result if your kernel is asymmetric) boundary : {'fill', 'wrap'}, optional A flag indicating how to handle boundaries: * 'fill': set values outside the array boundary to fill_value (default) * 'wrap': periodic boundary The `None` and 'extend' parameters are not supported for FFT-based convolution. fill_value : float, optional The value to use outside the array when using boundary='fill'. nan_treatment : {'interpolate', 'fill'}, optional The method used to handle NaNs in the input ``array``: * ``'interpolate'``: ``NaN`` values are replaced with interpolated values using the kernel as an interpolation function. Note that if the kernel has a sum equal to zero, NaN interpolation is not possible and will raise an exception. * ``'fill'``: ``NaN`` values are replaced by ``fill_value`` prior to convolution. normalize_kernel : callable or boolean, optional If specified, this is the function to divide kernel by to normalize it. e.g., ``normalize_kernel=np.sum`` means that kernel will be modified to be: ``kernel = kernel / np.sum(kernel)``. If True, defaults to ``normalize_kernel = np.sum``. normalization_zero_tol : float, optional The absolute tolerance on whether the kernel is different than zero. If the kernel sums to zero to within this precision, it cannot be normalized. Default is "1e-8". preserve_nan : bool, optional After performing convolution, should pixels that were originally NaN again become NaN? mask : None or ndarray, optional A "mask" array. Shape must match ``array``, and anything that is masked (i.e., not 0/`False`) will be set to NaN for the convolution. If `None`, no masking will be performed unless ``array`` is a masked array. If ``mask`` is not `None` *and* ``array`` is a masked array, a pixel is masked if it is masked in either ``mask`` *or* ``array.mask``. crop : bool, optional Default on. Return an image of the size of the larger of the input image and the kernel. If the image and kernel are asymmetric in opposite directions, will return the largest image in both directions. For example, if an input image has shape [100,3] but a kernel with shape [6,6] is used, the output will be [100,6]. return_fft : bool, optional Return the ``fft(image)*fft(kernel)`` instead of the convolution (which is ``ifft(fft(image)*fft(kernel))``). Useful for making PSDs. fft_pad : bool, optional Default on. Zero-pad image to the nearest size supporting more efficient execution of the FFT, generally values factorizable into the first 3-5 prime numbers. With ``boundary='wrap'``, this will be disabled. psf_pad : bool, optional Zero-pad image to be at least the sum of the image sizes to avoid edge-wrapping when smoothing. This is enabled by default with ``boundary='fill'``, but it can be overridden with a boolean option. ``boundary='wrap'`` and ``psf_pad=True`` are not compatible. min_wt : float, optional If ignoring ``NaN`` / zeros, force all grid points with a weight less than this value to ``NaN`` (the weight of a grid point with *no* ignored neighbors is 1.0). If ``min_wt`` is zero, then all zero-weight points will be set to zero instead of ``NaN`` (which they would be otherwise, because 1/0 = nan). See the examples below. allow_huge : bool, optional Allow huge arrays in the FFT? If False, will raise an exception if the array or kernel size is >1 GB. fftn : callable, optional The fft function. Can be overridden to use your own ffts, e.g. an fftw3 wrapper or scipy's fftn, ``fft=scipy.fftpack.fftn``. ifftn : callable, optional The inverse fft function. Can be overridden the same way ``fttn``. complex_dtype : complex type, optional Which complex dtype to use. `numpy` has a range of options, from 64 to 256. dealias: bool, optional Default off. Zero-pad image to enable explicit dealiasing of convolution. With ``boundary='wrap'``, this will be disabled. Note that for an input of nd dimensions this will increase the size of the temporary arrays by at least ``1.5**nd``. This may result in significantly more memory usage. Returns ------- default : ndarray ``array`` convolved with ``kernel``. If ``return_fft`` is set, returns ``fft(array) * fft(kernel)``. If crop is not set, returns the image, but with the fft-padded size instead of the input size. Raises ------ `ValueError` If the array is bigger than 1 GB after padding, will raise this exception unless ``allow_huge`` is True. See Also -------- convolve: Convolve is a non-fft version of this code. It is more memory efficient and for small kernels can be faster. Notes ----- With ``psf_pad=True`` and a large PSF, the resulting data can become large and consume a lot of memory. See Issue https://github.com/astropy/astropy/pull/4366 and the update in https://github.com/astropy/astropy/pull/11533 for further details. Dealiasing of pseudospectral convolutions is necessary for numerical stability of the underlying algorithms. A common method for handling this is to zero pad the image by at least 1/2 to eliminate the wavenumbers which have been aliased by convolution. This is so that the aliased 1/3 of the results of the convolution computation can be thrown out. See https://doi.org/10.1175/1520-0469(1971)028%3C1074:OTEOAI%3E2.0.CO;2 https://iopscience.iop.org/article/10.1088/1742-6596/318/7/072037 Note that if dealiasing is necessary to your application, but your process is memory constrained, you may want to consider using FFTW++: https://github.com/dealias/fftwpp. It includes python wrappers for a pseudospectral convolution which will implicitly dealias your convolution without the need for additional padding. Note that one cannot use FFTW++'s convlution directly in this method as in handles the entire convolution process internally. Additionally, FFTW++ includes other useful pseudospectral methods to consider. Examples -------- >>> convolve_fft([1, 0, 3], [1, 1, 1]) array([0.33333333, 1.33333333, 1. ]) >>> convolve_fft([1, np.nan, 3], [1, 1, 1]) array([0.5, 2. , 1.5]) >>> convolve_fft([1, 0, 3], [0, 1, 0]) # doctest: +FLOAT_CMP array([ 1.00000000e+00, -3.70074342e-17, 3.00000000e+00]) >>> convolve_fft([1, 2, 3], [1]) array([1., 2., 3.]) >>> convolve_fft([1, np.nan, 3], [0, 1, 0], nan_treatment='interpolate') array([1., 0., 3.]) >>> convolve_fft([1, np.nan, 3], [0, 1, 0], nan_treatment='interpolate', ... min_wt=1e-8) array([ 1., nan, 3.]) >>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate') array([0.5, 2. , 1.5]) >>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate', ... normalize_kernel=True) array([0.5, 2. , 1.5]) >>> import scipy.fft # optional - requires scipy >>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate', ... normalize_kernel=True, ... fftn=scipy.fft.fftn, ifftn=scipy.fft.ifftn) array([0.5, 2. , 1.5]) >>> fft_mp = lambda a: scipy.fft.fftn(a, workers=-1) # use all available cores >>> ifft_mp = lambda a: scipy.fft.ifftn(a, workers=-1) >>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate', ... normalize_kernel=True, fftn=fft_mp, ifftn=ifft_mp) array([0.5, 2. , 1.5]) """ # Checking copied from convolve.py - however, since FFTs have real & # complex components, we change the types. Only the real part will be # returned! Note that this always makes a copy. # Check kernel is kernel instance if isinstance(kernel, Kernel): kernel = kernel.array if isinstance(array, Kernel): raise TypeError( "Can't convolve two kernels with convolve_fft. Use convolve instead." ) if nan_treatment not in ("interpolate", "fill"): raise ValueError("nan_treatment must be one of 'interpolate','fill'") # Get array quantity if it exists array_unit = getattr(array, "unit", None) # Convert array dtype to complex # and ensure that list inputs become arrays array = _copy_input_if_needed( array, dtype=complex, order="C", nan_treatment=nan_treatment, mask=mask, fill_value=np.nan, ) kernel = _copy_input_if_needed( kernel, dtype=complex, order="C", nan_treatment=None, mask=None, fill_value=0 ) # Check that the number of dimensions is compatible if array.ndim != kernel.ndim: raise ValueError("Image and kernel must have same number of dimensions") arrayshape = array.shape kernshape = kernel.shape array_size_B = ( np.prod(arrayshape, dtype=np.int64) * np.dtype(complex_dtype).itemsize ) * u.byte if array_size_B > 1 * u.GB and not allow_huge: raise ValueError( f"Size Error: Arrays will be {human_file_size(array_size_B)}. " "Use allow_huge=True to override this exception." ) # NaN and inf catching nanmaskarray = np.isnan(array) | np.isinf(array) if nan_treatment == "fill": array[nanmaskarray] = fill_value else: array[nanmaskarray] = 0 nanmaskkernel = np.isnan(kernel) | np.isinf(kernel) kernel[nanmaskkernel] = 0 if normalize_kernel is True: if kernel.sum() < 1.0 / MAX_NORMALIZATION: raise Exception( "The kernel can't be normalized, because its sum is close to zero. The" f" sum of the given kernel is < {1.0 / MAX_NORMALIZATION}" ) kernel_scale = kernel.sum() normalized_kernel = kernel / kernel_scale kernel_scale = 1 # if we want to normalize it, leave it normed! elif normalize_kernel: # try this. If a function is not passed, the code will just crash... I # think type checking would be better but PEPs say otherwise... kernel_scale = normalize_kernel(kernel) normalized_kernel = kernel / kernel_scale else: kernel_scale = kernel.sum() if np.abs(kernel_scale) < normalization_zero_tol: if nan_treatment == "interpolate": raise ValueError( "Cannot interpolate NaNs with an unnormalizable kernel" ) else: # the kernel's sum is near-zero, so it can't be scaled kernel_scale = 1 normalized_kernel = kernel else: # the kernel is normalizable; we'll temporarily normalize it # now and undo the normalization later. normalized_kernel = kernel / kernel_scale if boundary is None: warnings.warn( "The convolve_fft version of boundary=None is " "equivalent to the convolve boundary='fill'. There is " "no FFT equivalent to convolve's " "zero-if-kernel-leaves-boundary", AstropyUserWarning, ) if psf_pad is None: psf_pad = True if fft_pad is None: fft_pad = True elif boundary == "fill": # create a boundary region at least as large as the kernel if psf_pad is False: warnings.warn( f"psf_pad was set to {psf_pad}, which overrides the " "boundary='fill' setting.", AstropyUserWarning, ) else: psf_pad = True if fft_pad is None: # default is 'True' according to the docstring fft_pad = True elif boundary == "wrap": if psf_pad: raise ValueError("With boundary='wrap', psf_pad cannot be enabled.") psf_pad = False if fft_pad: raise ValueError("With boundary='wrap', fft_pad cannot be enabled.") fft_pad = False if dealias: raise ValueError("With boundary='wrap', dealias cannot be enabled.") fill_value = 0 # force zero; it should not be used elif boundary == "extend": raise NotImplementedError( "The 'extend' option is not implemented for fft-based convolution" ) # Add shapes elementwise for psf_pad. if psf_pad: # default=False # add the sizes along each dimension (bigger) newshape = np.array(arrayshape) + np.array(kernshape) else: # take the larger shape in each dimension (smaller) newshape = np.maximum(arrayshape, kernshape) if dealias: # Extend shape by 1/2 for dealiasing newshape += np.ceil(newshape / 2).astype(int) # Find ideal size for fft (was power of 2, now any powers of prime factors 2, 3, 5). if fft_pad: # default=True # Get optimized sizes from scipy. newshape = _next_fast_lengths(newshape) # perform a second check after padding array_size_C = ( np.prod(newshape, dtype=np.int64) * np.dtype(complex_dtype).itemsize ) * u.byte if array_size_C > 1 * u.GB and not allow_huge: raise ValueError( f"Size Error: Arrays will be {human_file_size(array_size_C)}. " "Use allow_huge=True to override this exception." ) # For future reference, this can be used to predict "almost exactly" # how much *additional* memory will be used. # size * (array + kernel + kernelfft + arrayfft + # (kernel*array)fft + # optional(weight image + weight_fft + weight_ifft) + # optional(returned_fft)) # total_memory_used_GB = (np.prod(newshape)*np.dtype(complex_dtype).itemsize # * (5 + 3*((interpolate_nan or ) and kernel_is_normalized)) # + (1 + (not return_fft)) * # np.prod(arrayshape)*np.dtype(complex_dtype).itemsize # + np.prod(arrayshape)*np.dtype(bool).itemsize # + np.prod(kernshape)*np.dtype(bool).itemsize) # ) / 1024.**3 # separate each dimension by the padding size... this is to determine the # appropriate slice size to get back to the input dimensions arrayslices = [] kernslices = [] for newdimsize, arraydimsize, kerndimsize in zip(newshape, arrayshape, kernshape): center = newdimsize - (newdimsize + 1) // 2 arrayslices += [ slice(center - arraydimsize // 2, center + (arraydimsize + 1) // 2) ] kernslices += [ slice(center - kerndimsize // 2, center + (kerndimsize + 1) // 2) ] arrayslices = tuple(arrayslices) kernslices = tuple(kernslices) if not np.all(newshape == arrayshape): if np.isfinite(fill_value): bigarray = np.ones(newshape, dtype=complex_dtype) * fill_value else: bigarray = np.zeros(newshape, dtype=complex_dtype) bigarray[arrayslices] = array else: bigarray = array if not np.all(newshape == kernshape): bigkernel = np.zeros(newshape, dtype=complex_dtype) bigkernel[kernslices] = normalized_kernel else: bigkernel = normalized_kernel arrayfft = fftn(bigarray) # need to shift the kernel so that, e.g., [0,0,1,0] -> [1,0,0,0] = unity kernfft = fftn(np.fft.ifftshift(bigkernel)) fftmult = arrayfft * kernfft interpolate_nan = nan_treatment == "interpolate" if interpolate_nan: if not np.isfinite(fill_value): bigimwt = np.zeros(newshape, dtype=complex_dtype) else: bigimwt = np.ones(newshape, dtype=complex_dtype) bigimwt[arrayslices] = 1.0 - nanmaskarray * interpolate_nan wtfft = fftn(bigimwt) # You can only get to this point if kernel_is_normalized wtfftmult = wtfft * kernfft wtsm = ifftn(wtfftmult) # need to re-zero weights outside of the image (if it is padded, we # still don't weight those regions) bigimwt[arrayslices] = wtsm.real[arrayslices] else: bigimwt = 1 if np.isnan(fftmult).any(): # this check should be unnecessary; call it an insanity check raise ValueError("Encountered NaNs in convolve. This is disallowed.") fftmult *= kernel_scale if array_unit is not None: fftmult <<= array_unit if return_fft: return fftmult if interpolate_nan: with np.errstate(divide="ignore", invalid="ignore"): # divide by zeros are expected here; if the weight is zero, we want # the output to be nan or inf rifft = (ifftn(fftmult)) / bigimwt if not np.isscalar(bigimwt): if min_wt > 0.0: rifft[bigimwt < min_wt] = np.nan else: # Set anything with no weight to zero (taking into account # slight offsets due to floating-point errors). rifft[bigimwt < 10 * np.finfo(bigimwt.dtype).eps] = 0.0 else: rifft = ifftn(fftmult) if preserve_nan: rifft[arrayslices][nanmaskarray] = np.nan if crop: result = rifft[arrayslices].real return result else: return rifft.real def interpolate_replace_nans(array, kernel, convolve=convolve, **kwargs): """ Given a data set containing NaNs, replace the NaNs by interpolating from neighboring data points with a given kernel. Parameters ---------- array : `numpy.ndarray` Array to be convolved with ``kernel``. It can be of any dimensionality, though only 1, 2, and 3d arrays have been tested. kernel : `numpy.ndarray` or `astropy.convolution.Kernel` The convolution kernel. The number of dimensions should match those for the array. The dimensions *do not* have to be odd in all directions, unlike in the non-fft `convolve` function. The kernel will be normalized if ``normalize_kernel`` is set. It is assumed to be centered (i.e., shifts may result if your kernel is asymmetric). The kernel *must be normalizable* (i.e., its sum cannot be zero). convolve : `convolve` or `convolve_fft` One of the two convolution functions defined in this package. Returns ------- newarray : `numpy.ndarray` A copy of the original array with NaN pixels replaced with their interpolated counterparts """ if not np.any(np.isnan(array)): return array.copy() newarray = array.copy() convolved = convolve( array, kernel, nan_treatment="interpolate", normalize_kernel=True, preserve_nan=False, **kwargs, ) isnan = np.isnan(array) newarray[isnan] = convolved[isnan] return newarray def convolve_models(model, kernel, mode="convolve_fft", **kwargs): """ Convolve two models using `~astropy.convolution.convolve_fft`. Parameters ---------- model : `~astropy.modeling.core.Model` Functional model kernel : `~astropy.modeling.core.Model` Convolution kernel mode : str Keyword representing which function to use for convolution. * 'convolve_fft' : use `~astropy.convolution.convolve_fft` function. * 'convolve' : use `~astropy.convolution.convolve`. **kwargs : dict Keyword arguments to me passed either to `~astropy.convolution.convolve` or `~astropy.convolution.convolve_fft` depending on ``mode``. Returns ------- default : `~astropy.modeling.core.CompoundModel` Convolved model """ if mode == "convolve_fft": operator = SPECIAL_OPERATORS.add( "convolve_fft", partial(convolve_fft, **kwargs) ) elif mode == "convolve": operator = SPECIAL_OPERATORS.add("convolve", partial(convolve, **kwargs)) else: raise ValueError(f"Mode {mode} is not supported.") return CompoundModel(operator, model, kernel) def convolve_models_fft(model, kernel, bounding_box, resolution, cache=True, **kwargs): """ Convolve two models using `~astropy.convolution.convolve_fft`. Parameters ---------- model : `~astropy.modeling.core.Model` Functional model kernel : `~astropy.modeling.core.Model` Convolution kernel bounding_box : tuple The bounding box which encompasses enough of the support of both the ``model`` and ``kernel`` so that an accurate convolution can be computed. resolution : float The resolution that one wishes to approximate the convolution integral at. cache : optional, bool Default value True. Allow for the storage of the convolution computation for later reuse. **kwargs : dict Keyword arguments to be passed either to `~astropy.convolution.convolve` or `~astropy.convolution.convolve_fft` depending on ``mode``. Returns ------- default : `~astropy.modeling.core.CompoundModel` Convolved model """ operator = SPECIAL_OPERATORS.add("convolve_fft", partial(convolve_fft, **kwargs)) return Convolution(operator, model, kernel, bounding_box, resolution, cache)
c06908ce7c48dd3ea235bce8780057bbe7d909c0db4e30397fe0b971252ecc9e
# Licensed under a 3-clause BSD style license - see LICENSE.rst """This module contains functions to determine where configuration and data/cache files used by Astropy should be placed. """ import os import shutil import sys from functools import wraps __all__ = ["get_config_dir", "get_cache_dir", "set_temp_config", "set_temp_cache"] def _find_home(): """Locates and return the home directory (or best approximation) on this system. Raises ------ OSError If the home directory cannot be located - usually means you are running Astropy on some obscure platform that doesn't have standard home directories. """ try: homedir = os.path.expanduser("~") except Exception: # Linux, Unix, AIX, OS X if os.name == "posix": if "HOME" in os.environ: homedir = os.environ["HOME"] else: raise OSError( "Could not find unix home directory to search for " "astropy config dir" ) elif os.name == "nt": # This is for all modern Windows (NT or after) if "MSYSTEM" in os.environ and os.environ.get("HOME"): # Likely using an msys shell; use whatever it is using for its # $HOME directory homedir = os.environ["HOME"] # See if there's a local home elif "HOMEDRIVE" in os.environ and "HOMEPATH" in os.environ: homedir = os.path.join(os.environ["HOMEDRIVE"], os.environ["HOMEPATH"]) # Maybe a user profile? elif "USERPROFILE" in os.environ: homedir = os.path.join(os.environ["USERPROFILE"]) else: try: import winreg as wreg shell_folders = r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders" key = wreg.OpenKey(wreg.HKEY_CURRENT_USER, shell_folders) homedir = wreg.QueryValueEx(key, "Personal")[0] key.Close() except Exception: # As a final possible resort, see if HOME is present if "HOME" in os.environ: homedir = os.environ["HOME"] else: raise OSError( "Could not find windows home directory to " "search for astropy config dir" ) else: # for other platforms, try HOME, although it probably isn't there if "HOME" in os.environ: homedir = os.environ["HOME"] else: raise OSError( "Could not find a home directory to search for " "astropy config dir - are you on an unsupported " "platform?" ) return homedir def get_config_dir(rootname="astropy"): """ Determines the package configuration directory name and creates the directory if it doesn't exist. This directory is typically ``$HOME/.astropy/config``, but if the XDG_CONFIG_HOME environment variable is set and the ``$XDG_CONFIG_HOME/astropy`` directory exists, it will be that directory. If neither exists, the former will be created and symlinked to the latter. Parameters ---------- rootname : str Name of the root configuration directory. For example, if ``rootname = 'pkgname'``, the configuration directory would be ``<home>/.pkgname/`` rather than ``<home>/.astropy`` (depending on platform). Returns ------- configdir : str The absolute path to the configuration directory. """ # symlink will be set to this if the directory is created linkto = None # If using set_temp_config, that overrides all if set_temp_config._temp_path is not None: xch = set_temp_config._temp_path config_path = os.path.join(xch, rootname) if not os.path.exists(config_path): os.mkdir(config_path) return os.path.abspath(config_path) # first look for XDG_CONFIG_HOME xch = os.environ.get("XDG_CONFIG_HOME") if xch is not None and os.path.exists(xch): xchpth = os.path.join(xch, rootname) if not os.path.islink(xchpth): if os.path.exists(xchpth): return os.path.abspath(xchpth) else: linkto = xchpth return os.path.abspath(_find_or_create_root_dir("config", linkto, rootname)) def get_cache_dir(rootname="astropy"): """ Determines the Astropy cache directory name and creates the directory if it doesn't exist. This directory is typically ``$HOME/.astropy/cache``, but if the XDG_CACHE_HOME environment variable is set and the ``$XDG_CACHE_HOME/astropy`` directory exists, it will be that directory. If neither exists, the former will be created and symlinked to the latter. Parameters ---------- rootname : str Name of the root cache directory. For example, if ``rootname = 'pkgname'``, the cache directory will be ``<cache>/.pkgname/``. Returns ------- cachedir : str The absolute path to the cache directory. """ # symlink will be set to this if the directory is created linkto = None # If using set_temp_cache, that overrides all if set_temp_cache._temp_path is not None: xch = set_temp_cache._temp_path cache_path = os.path.join(xch, rootname) if not os.path.exists(cache_path): os.mkdir(cache_path) return os.path.abspath(cache_path) # first look for XDG_CACHE_HOME xch = os.environ.get("XDG_CACHE_HOME") if xch is not None and os.path.exists(xch): xchpth = os.path.join(xch, rootname) if not os.path.islink(xchpth): if os.path.exists(xchpth): return os.path.abspath(xchpth) else: linkto = xchpth return os.path.abspath(_find_or_create_root_dir("cache", linkto, rootname)) class _SetTempPath: _temp_path = None _default_path_getter = None def __init__(self, path=None, delete=False): if path is not None: path = os.path.abspath(path) self._path = path self._delete = delete self._prev_path = self.__class__._temp_path def __enter__(self): self.__class__._temp_path = self._path try: return self._default_path_getter("astropy") except Exception: self.__class__._temp_path = self._prev_path raise def __exit__(self, *args): self.__class__._temp_path = self._prev_path if self._delete and self._path is not None: shutil.rmtree(self._path) def __call__(self, func): """Implements use as a decorator.""" @wraps(func) def wrapper(*args, **kwargs): with self: func(*args, **kwargs) return wrapper class set_temp_config(_SetTempPath): """ Context manager to set a temporary path for the Astropy config, primarily for use with testing. If the path set by this context manager does not already exist it will be created, if possible. This may also be used as a decorator on a function to set the config path just within that function. Parameters ---------- path : str, optional The directory (which must exist) in which to find the Astropy config files, or create them if they do not already exist. If None, this restores the config path to the user's default config path as returned by `get_config_dir` as though this context manager were not in effect (this is useful for testing). In this case the ``delete`` argument is always ignored. delete : bool, optional If True, cleans up the temporary directory after exiting the temp context (default: False). """ _default_path_getter = staticmethod(get_config_dir) def __enter__(self): # Special case for the config case, where we need to reset all the # cached config objects. We do keep the cache, since some of it # may have been set programmatically rather than be stored in the # config file (e.g., iers.conf.auto_download=False for our tests). from .configuration import _cfgobjs self._cfgobjs_copy = _cfgobjs.copy() _cfgobjs.clear() return super().__enter__() def __exit__(self, *args): from .configuration import _cfgobjs _cfgobjs.clear() _cfgobjs.update(self._cfgobjs_copy) del self._cfgobjs_copy super().__exit__(*args) class set_temp_cache(_SetTempPath): """ Context manager to set a temporary path for the Astropy download cache, primarily for use with testing (though there may be other applications for setting a different cache directory, for example to switch to a cache dedicated to large files). If the path set by this context manager does not already exist it will be created, if possible. This may also be used as a decorator on a function to set the cache path just within that function. Parameters ---------- path : str The directory (which must exist) in which to find the Astropy cache files, or create them if they do not already exist. If None, this restores the cache path to the user's default cache path as returned by `get_cache_dir` as though this context manager were not in effect (this is useful for testing). In this case the ``delete`` argument is always ignored. delete : bool, optional If True, cleans up the temporary directory after exiting the temp context (default: False). """ _default_path_getter = staticmethod(get_cache_dir) def _find_or_create_root_dir(dirnm, linkto, pkgname="astropy"): innerdir = os.path.join(_find_home(), f".{pkgname}") maindir = os.path.join(_find_home(), f".{pkgname}", dirnm) if not os.path.exists(maindir): # first create .astropy dir if needed if not os.path.exists(innerdir): try: os.mkdir(innerdir) except OSError: if not os.path.isdir(innerdir): raise elif not os.path.isdir(innerdir): raise OSError( f"Intended {pkgname} {dirnm} directory {maindir} is actually a file." ) try: os.mkdir(maindir) except OSError: if not os.path.isdir(maindir): raise if ( not sys.platform.startswith("win") and linkto is not None and not os.path.exists(linkto) ): os.symlink(maindir, linkto) elif not os.path.isdir(maindir): raise OSError( f"Intended {pkgname} {dirnm} directory {maindir} is actually a file." ) return os.path.abspath(maindir)
e65a4afa8e6c943858fc374b7bccf58e7d7d512bc38fb56f658e23abc65a06a8
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Distribution class and associated machinery. """ import builtins import numpy as np from astropy import stats from astropy import units as u __all__ = ["Distribution"] # we set this by hand because the symbolic expression (below) requires scipy # SMAD_SCALE_FACTOR = 1 / scipy.stats.norm.ppf(0.75) SMAD_SCALE_FACTOR = 1.48260221850560203193936104071326553821563720703125 class Distribution: """ A scalar value or array values with associated uncertainty distribution. This object will take its exact type from whatever the ``samples`` argument is. In general this is expected to be an `~astropy.units.Quantity` or `numpy.ndarray`, although anything compatible with `numpy.asanyarray` is possible. See also: https://docs.astropy.org/en/stable/uncertainty/ Parameters ---------- samples : array-like The distribution, with sampling along the *leading* axis. If 1D, the sole dimension is used as the sampling axis (i.e., it is a scalar distribution). """ _generated_subclasses = {} def __new__(cls, samples): if isinstance(samples, Distribution): samples = samples.distribution else: samples = np.asanyarray(samples, order="C") if samples.shape == (): raise TypeError("Attempted to initialize a Distribution with a scalar") new_dtype = np.dtype( {"names": ["samples"], "formats": [(samples.dtype, (samples.shape[-1],))]} ) samples_cls = type(samples) new_cls = cls._generated_subclasses.get(samples_cls) if new_cls is None: # Make a new class with the combined name, inserting Distribution # itself below the samples class since that way Quantity methods # like ".to" just work (as .view() gets intercepted). However, # repr and str are problems, so we put those on top. # TODO: try to deal with this at the lower level. The problem is # that array2string does not allow one to override how structured # arrays are typeset, leading to all samples to be shown. It may # be possible to hack oneself out by temporarily becoming a void. new_name = samples_cls.__name__ + cls.__name__ new_cls = type( new_name, (_DistributionRepr, samples_cls, ArrayDistribution), {"_samples_cls": samples_cls}, ) cls._generated_subclasses[samples_cls] = new_cls self = samples.view(dtype=new_dtype, type=new_cls) # Get rid of trailing dimension of 1. self.shape = samples.shape[:-1] return self @property def distribution(self): return self["samples"] def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): converted = [] outputs = kwargs.pop("out", None) if outputs: kwargs["out"] = tuple( (output.distribution if isinstance(output, Distribution) else output) for output in outputs ) if method in {"reduce", "accumulate", "reduceat"}: axis = kwargs.get("axis", None) if axis is None: assert isinstance(inputs[0], Distribution) kwargs["axis"] = tuple(range(inputs[0].ndim)) for input_ in inputs: if isinstance(input_, Distribution): converted.append(input_.distribution) else: shape = getattr(input_, "shape", ()) if shape: converted.append(input_[..., np.newaxis]) else: converted.append(input_) results = getattr(ufunc, method)(*converted, **kwargs) if not isinstance(results, tuple): results = (results,) if outputs is None: outputs = (None,) * len(results) finals = [] for result, output in zip(results, outputs): if output is not None: finals.append(output) else: if getattr(result, "shape", False): finals.append(Distribution(result)) else: finals.append(result) return finals if len(finals) > 1 else finals[0] @property def n_samples(self): """ The number of samples of this distribution. A single `int`. """ return self.dtype["samples"].shape[0] def pdf_mean(self, dtype=None, out=None): """ The mean of this distribution. Arguments are as for `numpy.mean`. """ return self.distribution.mean(axis=-1, dtype=dtype, out=out) def pdf_std(self, dtype=None, out=None, ddof=0): """ The standard deviation of this distribution. Arguments are as for `numpy.std`. """ return self.distribution.std(axis=-1, dtype=dtype, out=out, ddof=ddof) def pdf_var(self, dtype=None, out=None, ddof=0): """ The variance of this distribution. Arguments are as for `numpy.var`. """ return self.distribution.var(axis=-1, dtype=dtype, out=out, ddof=ddof) def pdf_median(self, out=None): """ The median of this distribution. Parameters ---------- out : array, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output, but the type (of the output) will be cast if necessary. """ return np.median(self.distribution, axis=-1, out=out) def pdf_mad(self, out=None): """ The median absolute deviation of this distribution. Parameters ---------- out : array, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output, but the type (of the output) will be cast if necessary. """ median = self.pdf_median(out=out) absdiff = np.abs(self - median) return np.median( absdiff.distribution, axis=-1, out=median, overwrite_input=True ) def pdf_smad(self, out=None): """ The median absolute deviation of this distribution rescaled to match the standard deviation for a normal distribution. Parameters ---------- out : array, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output, but the type (of the output) will be cast if necessary. """ result = self.pdf_mad(out=out) result *= SMAD_SCALE_FACTOR return result def pdf_percentiles(self, percentile, **kwargs): """ Compute percentiles of this Distribution. Parameters ---------- percentile : float or array of float or `~astropy.units.Quantity` The desired percentiles of the distribution (i.e., on [0,100]). `~astropy.units.Quantity` will be converted to percent, meaning that a ``dimensionless_unscaled`` `~astropy.units.Quantity` will be interpreted as a quantile. Additional keywords are passed into `numpy.percentile`. Returns ------- percentiles : `~astropy.units.Quantity` ['dimensionless'] The ``fracs`` percentiles of this distribution. """ percentile = u.Quantity(percentile, u.percent).value percs = np.percentile(self.distribution, percentile, axis=-1, **kwargs) # numpy.percentile strips units for unclear reasons, so we have to make # a new object with units if hasattr(self.distribution, "_new_view"): return self.distribution._new_view(percs) else: return percs def pdf_histogram(self, **kwargs): """ Compute histogram over the samples in the distribution. Parameters ---------- All keyword arguments are passed into `astropy.stats.histogram`. Note That some of these options may not be valid for some multidimensional distributions. Returns ------- hist : array The values of the histogram. Trailing dimension is the histogram dimension. bin_edges : array of dtype float Return the bin edges ``(length(hist)+1)``. Trailing dimension is the bin histogram dimension. """ distr = self.distribution raveled_distr = distr.reshape(distr.size // distr.shape[-1], distr.shape[-1]) nhists = [] bin_edges = [] for d in raveled_distr: nhist, bin_edge = stats.histogram(d, **kwargs) nhists.append(nhist) bin_edges.append(bin_edge) nhists = np.array(nhists) nh_shape = self.shape + (nhists.size // self.size,) bin_edges = np.array(bin_edges) be_shape = self.shape + (bin_edges.size // self.size,) return nhists.reshape(nh_shape), bin_edges.reshape(be_shape) class ScalarDistribution(Distribution, np.void): """Scalar distribution. This class mostly exists to make `~numpy.array2print` possible for all subclasses. It is a scalar element, still with n_samples samples. """ pass class ArrayDistribution(Distribution, np.ndarray): # This includes the important override of view and __getitem__ # which are needed for all ndarray subclass Distributions, but not # for the scalar one. _samples_cls = np.ndarray # Override view so that we stay a Distribution version of the new type. def view(self, dtype=None, type=None): """New view of array with the same data. Like `~numpy.ndarray.view` except that the result will always be a new `~astropy.uncertainty.Distribution` instance. If the requested ``type`` is a `~astropy.uncertainty.Distribution`, then no change in ``dtype`` is allowed. """ if type is None and ( isinstance(dtype, builtins.type) and issubclass(dtype, np.ndarray) ): type = dtype dtype = None view_args = [item for item in (dtype, type) if item is not None] if type is None or ( isinstance(type, builtins.type) and issubclass(type, Distribution) ): if dtype is not None and dtype != self.dtype: raise ValueError( "cannot view as Distribution subclass with a new dtype." ) return super().view(*view_args) # View as the new non-Distribution class, but turn into a Distribution again. result = self.distribution.view(*view_args) return Distribution(result) # Override __getitem__ so that 'samples' is returned as the sample class. def __getitem__(self, item): if isinstance(item, Distribution): # Required for in-place operations like dist[dist < 0] += 360. return self.distribution[item.distribution] result = super().__getitem__(item) if item == "samples": # Here, we need to avoid our own redefinition of view. return super(ArrayDistribution, result).view(self._samples_cls) elif isinstance(result, np.void): return result.view((ScalarDistribution, result.dtype)) else: return result def __setitem__(self, item, value): if isinstance(item, Distribution): # Support operations like dist[dist < 0] = 0. self.distribution[item.distribution] = value else: super().__setitem__(item, value) # Override __eq__ and __ne__ to pass on directly to the ufunc since # otherwise comparisons with non-distributions do not work (but # deferring if other defines __array_ufunc__ = None -- see # numpy/core/src/common/binop_override.h for the logic; we assume we # will never deal with __array_priority__ any more). Note: there is no # problem for other comparisons, since for those, structured arrays are # not treated differently in numpy/core/src/multiarray/arrayobject.c. def __eq__(self, other): if getattr(other, "__array_ufunc__", False) is None: return NotImplemented return np.equal(self, other) def __ne__(self, other): if getattr(other, "__array_ufunc__", False) is None: return NotImplemented return np.not_equal(self, other) class _DistributionRepr: def __repr__(self): reprarr = repr(self.distribution) if reprarr.endswith(">"): firstspace = reprarr.find(" ") reprarr = reprarr[firstspace + 1 : -1] # :-1] removes the ending '>' return ( f"<{self.__class__.__name__} {reprarr} with n_samples={self.n_samples}>" ) else: # numpy array-like firstparen = reprarr.find("(") reprarr = reprarr[firstparen:] return f"{self.__class__.__name__}{reprarr} with n_samples={self.n_samples}" def __str__(self): distrstr = str(self.distribution) toadd = f" with n_samples={self.n_samples}" return distrstr + toadd def _repr_latex_(self): if hasattr(self.distribution, "_repr_latex_"): superlatex = self.distribution._repr_latex_() toadd = rf", \; n_{{\rm samp}}={self.n_samples}" return superlatex[:-1] + toadd + superlatex[-1] else: return None class NdarrayDistribution(_DistributionRepr, ArrayDistribution): pass # Ensure our base NdarrayDistribution is known. Distribution._generated_subclasses[np.ndarray] = NdarrayDistribution
ccd29fff73bdee3488394c967273ec22d06b634c9d00e7078919d107e5b078f3
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module implements classes (called Fitters) which combine optimization algorithms (typically from `scipy.optimize`) with statistic functions to perform fitting. Fitters are implemented as callable classes. In addition to the data to fit, the ``__call__`` method takes an instance of `~astropy.modeling.core.FittableModel` as input, and returns a copy of the model with its parameters determined by the optimizer. Optimization algorithms, called "optimizers" are implemented in `~astropy.modeling.optimizers` and statistic functions are in `~astropy.modeling.statistic`. The goal is to provide an easy to extend framework and allow users to easily create new fitters by combining statistics with optimizers. There are two exceptions to the above scheme. `~astropy.modeling.fitting.LinearLSQFitter` uses Numpy's `~numpy.linalg.lstsq` function. `~astropy.modeling.fitting.LevMarLSQFitter` uses `~scipy.optimize.leastsq` which combines optimization and statistic in one implementation. """ # pylint: disable=invalid-name import abc import inspect import operator import warnings from functools import reduce, wraps from importlib.metadata import entry_points import numpy as np from astropy.units import Quantity from astropy.utils.decorators import deprecated from astropy.utils.exceptions import AstropyUserWarning from .optimizers import DEFAULT_ACC, DEFAULT_EPS, DEFAULT_MAXITER, SLSQP, Simplex from .spline import ( SplineExactKnotsFitter, SplineInterpolateFitter, SplineSmoothingFitter, SplineSplrepFitter, ) from .statistic import leastsquare from .utils import _combine_equivalency_dict, poly_map_domain __all__ = [ "LinearLSQFitter", "LevMarLSQFitter", "TRFLSQFitter", "DogBoxLSQFitter", "LMLSQFitter", "FittingWithOutlierRemoval", "SLSQPLSQFitter", "SimplexLSQFitter", "JointFitter", "Fitter", "ModelLinearityError", "ModelsError", "SplineExactKnotsFitter", "SplineInterpolateFitter", "SplineSmoothingFitter", "SplineSplrepFitter", ] # Statistic functions implemented in `astropy.modeling.statistic.py STATISTICS = [leastsquare] # Optimizers implemented in `astropy.modeling.optimizers.py OPTIMIZERS = [Simplex, SLSQP] class NonFiniteValueError(RuntimeError): """ Error raised when attempting to a non-finite value. """ class Covariance: """Class for covariance matrix calculated by fitter.""" def __init__(self, cov_matrix, param_names): self.cov_matrix = cov_matrix self.param_names = param_names def pprint(self, max_lines, round_val): # Print and label lower triangle of covariance matrix # Print rows for params up to `max_lines`, round floats to 'round_val' longest_name = max(len(x) for x in self.param_names) ret_str = "parameter variances / covariances \n" fstring = f'{"": <{longest_name}}| {{0}}\n' for i, row in enumerate(self.cov_matrix): if i <= max_lines - 1: param = self.param_names[i] ret_str += fstring.replace(" " * len(param), param, 1).format( repr(np.round(row[: i + 1], round_val))[7:-2] ) else: ret_str += "..." return ret_str.rstrip() def __repr__(self): return self.pprint(max_lines=10, round_val=3) def __getitem__(self, params): # index covariance matrix by parameter names or indices if len(params) != 2: raise ValueError("Covariance must be indexed by two values.") if all(isinstance(item, str) for item in params): i1, i2 = self.param_names.index(params[0]), self.param_names.index( params[1] ) elif all(isinstance(item, int) for item in params): i1, i2 = params else: raise TypeError( "Covariance can be indexed by two parameter names or integer indices." ) return self.cov_matrix[i1][i2] class StandardDeviations: """Class for fitting uncertainties.""" def __init__(self, cov_matrix, param_names): self.param_names = param_names self.stds = self._calc_stds(cov_matrix) def _calc_stds(self, cov_matrix): # sometimes scipy lstsq returns a non-sensical negative vals in the # diagonals of the cov_x it computes. stds = [np.sqrt(x) if x > 0 else None for x in np.diag(cov_matrix)] return stds def pprint(self, max_lines, round_val): longest_name = max(len(x) for x in self.param_names) ret_str = "standard deviations\n" for i, std in enumerate(self.stds): if i <= max_lines - 1: param = self.param_names[i] ret_str += ( f"{param}{' ' * (longest_name - len(param))}| " f"{np.round(std, round_val)}\n" ) else: ret_str += "..." return ret_str.rstrip() def __repr__(self): return self.pprint(max_lines=10, round_val=3) def __getitem__(self, param): if isinstance(param, str): i = self.param_names.index(param) elif isinstance(param, int): i = param else: raise TypeError( "Standard deviation can be indexed by parameter name or integer." ) return self.stds[i] class ModelsError(Exception): """Base class for model exceptions.""" class ModelLinearityError(ModelsError): """Raised when a non-linear model is passed to a linear fitter.""" class UnsupportedConstraintError(ModelsError, ValueError): """ Raised when a fitter does not support a type of constraint. """ class _FitterMeta(abc.ABCMeta): """ Currently just provides a registry for all Fitter classes. """ registry = set() def __new__(mcls, name, bases, members): cls = super().__new__(mcls, name, bases, members) if not inspect.isabstract(cls) and not name.startswith("_"): mcls.registry.add(cls) return cls def fitter_unit_support(func): """ This is a decorator that can be used to add support for dealing with quantities to any __call__ method on a fitter which may not support quantities itself. This is done by temporarily removing units from all parameters then adding them back once the fitting has completed. """ @wraps(func) def wrapper(self, model, x, y, z=None, **kwargs): equivalencies = kwargs.pop("equivalencies", None) data_has_units = ( isinstance(x, Quantity) or isinstance(y, Quantity) or isinstance(z, Quantity) ) model_has_units = model._has_units if data_has_units or model_has_units: if model._supports_unit_fitting: # We now combine any instance-level input equivalencies with user # specified ones at call-time. input_units_equivalencies = _combine_equivalency_dict( model.inputs, equivalencies, model.input_units_equivalencies ) # If input_units is defined, we transform the input data into those # expected by the model. We hard-code the input names 'x', and 'y' # here since FittableModel instances have input names ('x',) or # ('x', 'y') if model.input_units is not None: if isinstance(x, Quantity): x = x.to( model.input_units[model.inputs[0]], equivalencies=input_units_equivalencies[model.inputs[0]], ) if isinstance(y, Quantity) and z is not None: y = y.to( model.input_units[model.inputs[1]], equivalencies=input_units_equivalencies[model.inputs[1]], ) # Create a dictionary mapping the real model inputs and outputs # names to the data. This remapping of names must be done here, after # the input data is converted to the correct units. rename_data = {model.inputs[0]: x} if z is not None: rename_data[model.outputs[0]] = z rename_data[model.inputs[1]] = y else: rename_data[model.outputs[0]] = y rename_data["z"] = None # We now strip away the units from the parameters, taking care to # first convert any parameters to the units that correspond to the # input units (to make sure that initial guesses on the parameters) # are in the right unit system model = model.without_units_for_data(**rename_data) if isinstance(model, tuple): rename_data["_left_kwargs"] = model[1] rename_data["_right_kwargs"] = model[2] model = model[0] # We strip away the units from the input itself add_back_units = False if isinstance(x, Quantity): add_back_units = True xdata = x.value else: xdata = np.asarray(x) if isinstance(y, Quantity): add_back_units = True ydata = y.value else: ydata = np.asarray(y) if z is not None: if isinstance(z, Quantity): add_back_units = True zdata = z.value else: zdata = np.asarray(z) # We run the fitting if z is None: model_new = func(self, model, xdata, ydata, **kwargs) else: model_new = func(self, model, xdata, ydata, zdata, **kwargs) # And finally we add back units to the parameters if add_back_units: model_new = model_new.with_units_from_data(**rename_data) return model_new else: raise NotImplementedError( "This model does not support being fit to data with units." ) else: return func(self, model, x, y, z=z, **kwargs) return wrapper class Fitter(metaclass=_FitterMeta): """ Base class for all fitters. Parameters ---------- optimizer : callable A callable implementing an optimization algorithm statistic : callable Statistic function """ supported_constraints = [] def __init__(self, optimizer, statistic): if optimizer is None: raise ValueError("Expected an optimizer.") if statistic is None: raise ValueError("Expected a statistic function.") if inspect.isclass(optimizer): # a callable class self._opt_method = optimizer() elif inspect.isfunction(optimizer): self._opt_method = optimizer else: raise ValueError("Expected optimizer to be a callable class or a function.") if inspect.isclass(statistic): self._stat_method = statistic() else: self._stat_method = statistic def objective_function(self, fps, *args): """ Function to minimize. Parameters ---------- fps : list parameters returned by the fitter args : list [model, [other_args], [input coordinates]] other_args may include weights or any other quantities specific for a statistic Notes ----- The list of arguments (args) is set in the `__call__` method. Fitters may overwrite this method, e.g. when statistic functions require other arguments. """ model = args[0] meas = args[-1] fitter_to_model_params(model, fps) res = self._stat_method(meas, model, *args[1:-1]) return res @staticmethod def _add_fitting_uncertainties(*args): """ When available, calculate and sets the parameter covariance matrix (model.cov_matrix) and standard deviations (model.stds). """ return None @abc.abstractmethod def __call__(self): """ This method performs the actual fitting and modifies the parameter list of a model. Fitter subclasses should implement this method. """ raise NotImplementedError("Subclasses should implement this method.") # TODO: I have ongoing branch elsewhere that's refactoring this module so that # all the fitter classes in here are Fitter subclasses. In the meantime we # need to specify that _FitterMeta is its metaclass. class LinearLSQFitter(metaclass=_FitterMeta): """ A class performing a linear least square fitting. Uses `numpy.linalg.lstsq` to do the fitting. Given a model and data, fits the model to the data and changes the model's parameters. Keeps a dictionary of auxiliary fitting information. Notes ----- Note that currently LinearLSQFitter does not support compound models. """ supported_constraints = ["fixed"] supports_masked_input = True def __init__(self, calc_uncertainties=False): self.fit_info = { "residuals": None, "rank": None, "singular_values": None, "params": None, } self._calc_uncertainties = calc_uncertainties @staticmethod def _is_invertible(m): """Check if inverse of matrix can be obtained.""" if m.shape[0] != m.shape[1]: return False if np.linalg.matrix_rank(m) < m.shape[0]: return False return True def _add_fitting_uncertainties(self, model, a, n_coeff, x, y, z=None, resids=None): """ Calculate and parameter covariance matrix and standard deviations and set `cov_matrix` and `stds` attributes. """ x_dot_x_prime = np.dot(a.T, a) masked = False or hasattr(y, "mask") # check if invertible. if not, can't calc covariance. if not self._is_invertible(x_dot_x_prime): return model inv_x_dot_x_prime = np.linalg.inv(x_dot_x_prime) if z is None: # 1D models if len(model) == 1: # single model mask = None if masked: mask = y.mask xx = np.ma.array(x, mask=mask) RSS = [(1 / (xx.count() - n_coeff)) * resids] if len(model) > 1: # model sets RSS = [] # collect sum residuals squared for each model in set for j in range(len(model)): mask = None if masked: mask = y.mask[..., j].flatten() xx = np.ma.array(x, mask=mask) eval_y = model(xx, model_set_axis=False) eval_y = np.rollaxis(eval_y, model.model_set_axis)[j] RSS.append( (1 / (xx.count() - n_coeff)) * np.sum((y[..., j] - eval_y) ** 2) ) else: # 2D model if len(model) == 1: mask = None if masked: warnings.warn( "Calculation of fitting uncertainties " "for 2D models with masked values not " "currently supported.\n", AstropyUserWarning, ) return xx, _ = np.ma.array(x, mask=mask), np.ma.array(y, mask=mask) # len(xx) instead of xx.count. this will break if values are masked? RSS = [(1 / (len(xx) - n_coeff)) * resids] else: RSS = [] for j in range(len(model)): eval_z = model(x, y, model_set_axis=False) mask = None # need to figure out how to deal w/ masking here. if model.model_set_axis == 1: # model_set_axis passed when evaluating only refers to input shapes # so output must be reshaped for model_set_axis=1. eval_z = np.rollaxis(eval_z, 1) eval_z = eval_z[j] RSS.append( [(1 / (len(x) - n_coeff)) * np.sum((z[j] - eval_z) ** 2)] ) covs = [inv_x_dot_x_prime * r for r in RSS] free_param_names = [ x for x in model.fixed if (model.fixed[x] is False) and (model.tied[x] is False) ] if len(covs) == 1: model.cov_matrix = Covariance(covs[0], model.param_names) model.stds = StandardDeviations(covs[0], free_param_names) else: model.cov_matrix = [Covariance(cov, model.param_names) for cov in covs] model.stds = [StandardDeviations(cov, free_param_names) for cov in covs] @staticmethod def _deriv_with_constraints(model, param_indices, x=None, y=None): if y is None: d = np.array(model.fit_deriv(x, *model.parameters)) else: d = np.array(model.fit_deriv(x, y, *model.parameters)) if model.col_fit_deriv: return d[param_indices] else: return d[..., param_indices] def _map_domain_window(self, model, x, y=None): """ Maps domain into window for a polynomial model which has these attributes. """ if y is None: if hasattr(model, "domain") and model.domain is None: model.domain = [x.min(), x.max()] if hasattr(model, "window") and model.window is None: model.window = [-1, 1] return poly_map_domain(x, model.domain, model.window) else: if hasattr(model, "x_domain") and model.x_domain is None: model.x_domain = [x.min(), x.max()] if hasattr(model, "y_domain") and model.y_domain is None: model.y_domain = [y.min(), y.max()] if hasattr(model, "x_window") and model.x_window is None: model.x_window = [-1.0, 1.0] if hasattr(model, "y_window") and model.y_window is None: model.y_window = [-1.0, 1.0] xnew = poly_map_domain(x, model.x_domain, model.x_window) ynew = poly_map_domain(y, model.y_domain, model.y_window) return xnew, ynew @fitter_unit_support def __call__(self, model, x, y, z=None, weights=None, rcond=None): """ Fit data to this model. Parameters ---------- model : `~astropy.modeling.FittableModel` model to fit to x, y, z x : array Input coordinates y : array-like Input coordinates z : array-like, optional Input coordinates. If the dependent (``y`` or ``z``) coordinate values are provided as a `numpy.ma.MaskedArray`, any masked points are ignored when fitting. Note that model set fitting is significantly slower when there are masked points (not just an empty mask), as the matrix equation has to be solved for each model separately when their coordinate grids differ. weights : array, optional Weights for fitting. For data with Gaussian uncertainties, the weights should be 1/sigma. rcond : float, optional Cut-off ratio for small singular values of ``a``. Singular values are set to zero if they are smaller than ``rcond`` times the largest singular value of ``a``. equivalencies : list or None, optional, keyword-only List of *additional* equivalencies that are should be applied in case x, y and/or z have units. Default is None. Returns ------- model_copy : `~astropy.modeling.FittableModel` a copy of the input model with parameters set by the fitter """ if not model.fittable: raise ValueError("Model must be a subclass of FittableModel") if not model.linear: raise ModelLinearityError( "Model is not linear in parameters, " "linear fit methods should not be used." ) if hasattr(model, "submodel_names"): raise ValueError("Model must be simple, not compound") _validate_constraints(self.supported_constraints, model) model_copy = model.copy() model_copy.sync_constraints = False _, fitparam_indices, _ = model_to_fit_params(model_copy) if model_copy.n_inputs == 2 and z is None: raise ValueError("Expected x, y and z for a 2 dimensional model.") farg = _convert_input( x, y, z, n_models=len(model_copy), model_set_axis=model_copy.model_set_axis ) n_fixed = sum(model_copy.fixed.values()) # This is also done by _convert_inputs, but we need it here to allow # checking the array dimensionality before that gets called: if weights is not None: weights = np.asarray(weights, dtype=float) if n_fixed: # The list of fixed params is the complement of those being fitted: fixparam_indices = [ idx for idx in range(len(model_copy.param_names)) if idx not in fitparam_indices ] # Construct matrix of user-fixed parameters that can be dotted with # the corresponding fit_deriv() terms, to evaluate corrections to # the dependent variable in order to fit only the remaining terms: fixparams = np.asarray( [ getattr(model_copy, model_copy.param_names[idx]).value for idx in fixparam_indices ] ) if len(farg) == 2: x, y = farg if weights is not None: # If we have separate weights for each model, apply the same # conversion as for the data, otherwise check common weights # as if for a single model: _, weights = _convert_input( x, weights, n_models=len(model_copy) if weights.ndim == y.ndim else 1, model_set_axis=model_copy.model_set_axis, ) # map domain into window if hasattr(model_copy, "domain"): x = self._map_domain_window(model_copy, x) if n_fixed: lhs = np.asarray( self._deriv_with_constraints(model_copy, fitparam_indices, x=x) ) fixderivs = self._deriv_with_constraints( model_copy, fixparam_indices, x=x ) else: lhs = np.asarray(model_copy.fit_deriv(x, *model_copy.parameters)) sum_of_implicit_terms = model_copy.sum_of_implicit_terms(x) rhs = y else: x, y, z = farg if weights is not None: # If we have separate weights for each model, apply the same # conversion as for the data, otherwise check common weights # as if for a single model: _, _, weights = _convert_input( x, y, weights, n_models=len(model_copy) if weights.ndim == z.ndim else 1, model_set_axis=model_copy.model_set_axis, ) # map domain into window if hasattr(model_copy, "x_domain"): x, y = self._map_domain_window(model_copy, x, y) if n_fixed: lhs = np.asarray( self._deriv_with_constraints(model_copy, fitparam_indices, x=x, y=y) ) fixderivs = self._deriv_with_constraints( model_copy, fixparam_indices, x=x, y=y ) else: lhs = np.asanyarray(model_copy.fit_deriv(x, y, *model_copy.parameters)) sum_of_implicit_terms = model_copy.sum_of_implicit_terms(x, y) if len(model_copy) > 1: # Just to be explicit (rather than baking in False == 0): model_axis = model_copy.model_set_axis or 0 if z.ndim > 2: # For higher-dimensional z, flatten all the axes except the # dimension along which models are stacked and transpose so # the model axis is *last* (I think this resolves Erik's # pending generalization from 80a6f25a): rhs = np.rollaxis(z, model_axis, z.ndim) rhs = rhs.reshape(-1, rhs.shape[-1]) else: # This "else" seems to handle the corner case where the # user has already flattened x/y before attempting a 2D fit # but z has a second axis for the model set. NB. This is # ~5-10x faster than using rollaxis. rhs = z.T if model_axis == 0 else z if weights is not None: # Same for weights if weights.ndim > 2: # Separate 2D weights for each model: weights = np.rollaxis(weights, model_axis, weights.ndim) weights = weights.reshape(-1, weights.shape[-1]) elif weights.ndim == z.ndim: # Separate, flattened weights for each model: weights = weights.T if model_axis == 0 else weights else: # Common weights for all the models: weights = weights.flatten() else: rhs = z.flatten() if weights is not None: weights = weights.flatten() # If the derivative is defined along rows (as with non-linear models) if model_copy.col_fit_deriv: lhs = np.asarray(lhs).T # Some models (eg. Polynomial1D) don't flatten multi-dimensional inputs # when constructing their Vandermonde matrix, which can lead to obscure # failures below. Ultimately, np.linalg.lstsq can't handle >2D matrices, # so just raise a slightly more informative error when this happens: if np.asanyarray(lhs).ndim > 2: raise ValueError( f"{type(model_copy).__name__} gives unsupported >2D " "derivative matrix for this x/y" ) # Subtract any terms fixed by the user from (a copy of) the RHS, in # order to fit the remaining terms correctly: if n_fixed: if model_copy.col_fit_deriv: fixderivs = np.asarray(fixderivs).T # as for lhs above rhs = rhs - fixderivs.dot(fixparams) # evaluate user-fixed terms # Subtract any terms implicit in the model from the RHS, which, like # user-fixed terms, affect the dependent variable but are not fitted: if sum_of_implicit_terms is not None: # If we have a model set, the extra axis must be added to # sum_of_implicit_terms as its innermost dimension, to match the # dimensionality of rhs after _convert_input "rolls" it as needed # by np.linalg.lstsq. The vector then gets broadcast to the right # number of sets (columns). This assumes all the models share the # same input coordinates, as is currently the case. if len(model_copy) > 1: sum_of_implicit_terms = sum_of_implicit_terms[..., np.newaxis] rhs = rhs - sum_of_implicit_terms if weights is not None: if rhs.ndim == 2: if weights.shape == rhs.shape: # separate weights for multiple models case: broadcast # lhs to have more dimension (for each model) lhs = lhs[..., np.newaxis] * weights[:, np.newaxis] rhs = rhs * weights else: lhs *= weights[:, np.newaxis] # Don't modify in-place in case rhs was the original # dependent variable array rhs = rhs * weights[:, np.newaxis] else: lhs *= weights[:, np.newaxis] rhs = rhs * weights scl = (lhs * lhs).sum(0) lhs /= scl masked = np.any(np.ma.getmask(rhs)) if weights is not None and not masked and np.any(np.isnan(lhs)): raise ValueError( "Found NaNs in the coefficient matrix, which " "should not happen and would crash the lapack " "routine. Maybe check that weights are not null." ) a = None # need for calculating covarience if (masked and len(model_copy) > 1) or ( weights is not None and weights.ndim > 1 ): # Separate masks or weights for multiple models case: Numpy's # lstsq supports multiple dimensions only for rhs, so we need to # loop manually on the models. This may be fixed in the future # with https://github.com/numpy/numpy/pull/15777. # Initialize empty array of coefficients and populate it one model # at a time. The shape matches the number of coefficients from the # Vandermonde matrix and the number of models from the RHS: lacoef = np.zeros(lhs.shape[1:2] + rhs.shape[-1:], dtype=rhs.dtype) # Arrange the lhs as a stack of 2D matrices that we can iterate # over to get the correctly-orientated lhs for each model: if lhs.ndim > 2: lhs_stack = np.rollaxis(lhs, -1, 0) else: lhs_stack = np.broadcast_to(lhs, rhs.shape[-1:] + lhs.shape) # Loop over the models and solve for each one. By this point, the # model set axis is the second of two. Transpose rather than using, # say, np.moveaxis(array, -1, 0), since it's slightly faster and # lstsq can't handle >2D arrays anyway. This could perhaps be # optimized by collecting together models with identical masks # (eg. those with no rejected points) into one operation, though it # will still be relatively slow when calling lstsq repeatedly. for model_lhs, model_rhs, model_lacoef in zip(lhs_stack, rhs.T, lacoef.T): # Cull masked points on both sides of the matrix equation: good = ~model_rhs.mask if masked else slice(None) model_lhs = model_lhs[good] model_rhs = model_rhs[good][..., np.newaxis] a = model_lhs # Solve for this model: t_coef, resids, rank, sval = np.linalg.lstsq( model_lhs, model_rhs, rcond ) model_lacoef[:] = t_coef.T else: # If we're fitting one or more models over a common set of points, # we only have to solve a single matrix equation, which is an order # of magnitude faster than calling lstsq() once per model below: good = ~rhs.mask if masked else slice(None) # latter is a no-op a = lhs[good] # Solve for one or more models: lacoef, resids, rank, sval = np.linalg.lstsq(lhs[good], rhs[good], rcond) self.fit_info["residuals"] = resids self.fit_info["rank"] = rank self.fit_info["singular_values"] = sval lacoef /= scl[:, np.newaxis] if scl.ndim < rhs.ndim else scl self.fit_info["params"] = lacoef fitter_to_model_params(model_copy, lacoef.flatten()) # TODO: Only Polynomial models currently have an _order attribute; # maybe change this to read isinstance(model, PolynomialBase) if ( hasattr(model_copy, "_order") and len(model_copy) == 1 and rank < (model_copy._order - n_fixed) ): warnings.warn("The fit may be poorly conditioned\n", AstropyUserWarning) # calculate and set covariance matrix and standard devs. on model if self._calc_uncertainties: if len(y) > len(lacoef): self._add_fitting_uncertainties( model_copy, a * scl, len(lacoef), x, y, z, resids ) model_copy.sync_constraints = True return model_copy class FittingWithOutlierRemoval: """ This class combines an outlier removal technique with a fitting procedure. Basically, given a maximum number of iterations ``niter``, outliers are removed and fitting is performed for each iteration, until no new outliers are found or ``niter`` is reached. Parameters ---------- fitter : `Fitter` An instance of any Astropy fitter, i.e., LinearLSQFitter, LevMarLSQFitter, SLSQPLSQFitter, SimplexLSQFitter, JointFitter. For model set fitting, this must understand masked input data (as indicated by the fitter class attribute ``supports_masked_input``). outlier_func : callable A function for outlier removal. If this accepts an ``axis`` parameter like the `numpy` functions, the appropriate value will be supplied automatically when fitting model sets (unless overridden in ``outlier_kwargs``), to find outliers for each model separately; otherwise, the same filtering must be performed in a loop over models, which is almost an order of magnitude slower. niter : int, optional Maximum number of iterations. outlier_kwargs : dict, optional Keyword arguments for outlier_func. Attributes ---------- fit_info : dict The ``fit_info`` (if any) from the last iteration of the wrapped ``fitter`` during the most recent fit. An entry is also added with the keyword ``niter`` that records the actual number of fitting iterations performed (as opposed to the user-specified maximum). """ def __init__(self, fitter, outlier_func, niter=3, **outlier_kwargs): self.fitter = fitter self.outlier_func = outlier_func self.niter = niter self.outlier_kwargs = outlier_kwargs self.fit_info = {"niter": None} def __str__(self): return ( f"Fitter: {self.fitter.__class__.__name__}\n" f"Outlier function: {self.outlier_func.__name__}\n" f"Num. of iterations: {self.niter}\n" f"Outlier func. args.: {self.outlier_kwargs}" ) def __repr__(self): return ( f"{self.__class__.__name__}(fitter: {self.fitter.__class__.__name__}, " f"outlier_func: {self.outlier_func.__name__}," f" niter: {self.niter}, outlier_kwargs: {self.outlier_kwargs})" ) def __call__(self, model, x, y, z=None, weights=None, **kwargs): """ Parameters ---------- model : `~astropy.modeling.FittableModel` An analytic model which will be fit to the provided data. This also contains the initial guess for an optimization algorithm. x : array-like Input coordinates. y : array-like Data measurements (1D case) or input coordinates (2D case). z : array-like, optional Data measurements (2D case). weights : array-like, optional Weights to be passed to the fitter. kwargs : dict, optional Keyword arguments to be passed to the fitter. Returns ------- fitted_model : `~astropy.modeling.FittableModel` Fitted model after outlier removal. mask : `numpy.ndarray` Boolean mask array, identifying which points were used in the final fitting iteration (False) and which were found to be outliers or were masked in the input (True). """ # For single models, the data get filtered here at each iteration and # then passed to the fitter, which is the historical behavior and # works even for fitters that don't understand masked arrays. For model # sets, the fitter must be able to filter masked data internally, # because fitters require a single set of x/y coordinates whereas the # eliminated points can vary between models. To avoid this limitation, # we could fall back to looping over individual model fits, but it # would likely be fiddly and involve even more overhead (and the # non-linear fitters don't work with model sets anyway, as of writing). if len(model) == 1: model_set_axis = None else: if ( not hasattr(self.fitter, "supports_masked_input") or self.fitter.supports_masked_input is not True ): raise ValueError( f"{type(self.fitter).__name__} cannot fit model sets with masked " "values" ) # Fitters use their input model's model_set_axis to determine how # their input data are stacked: model_set_axis = model.model_set_axis # Construct input coordinate tuples for fitters & models that are # appropriate for the dimensionality being fitted: if z is None: coords = (x,) data = y else: coords = x, y data = z # For model sets, construct a numpy-standard "axis" tuple for the # outlier function, to treat each model separately (if supported): if model_set_axis is not None: if model_set_axis < 0: model_set_axis += data.ndim if "axis" not in self.outlier_kwargs: # allow user override # This also works for False (like model instantiation): self.outlier_kwargs["axis"] = tuple( n for n in range(data.ndim) if n != model_set_axis ) loop = False # Starting fit, prior to any iteration and masking: fitted_model = self.fitter(model, x, y, z, weights=weights, **kwargs) filtered_data = np.ma.masked_array(data) if filtered_data.mask is np.ma.nomask: filtered_data.mask = False filtered_weights = weights last_n_masked = filtered_data.mask.sum() n = 0 # (allow recording no. of iterations when 0) # Perform the iterative fitting: for n in range(1, self.niter + 1): # (Re-)evaluate the last model: model_vals = fitted_model(*coords, model_set_axis=False) # Determine the outliers: if not loop: # Pass axis parameter if outlier_func accepts it, otherwise # prepare for looping over models: try: filtered_data = self.outlier_func( filtered_data - model_vals, **self.outlier_kwargs ) # If this happens to catch an error with a parameter other # than axis, the next attempt will fail accordingly: except TypeError: if model_set_axis is None: raise else: self.outlier_kwargs.pop("axis", None) loop = True # Construct MaskedArray to hold filtered values: filtered_data = np.ma.masked_array( filtered_data, dtype=np.result_type(filtered_data, model_vals), copy=True, ) # Make sure the mask is an array, not just nomask: if filtered_data.mask is np.ma.nomask: filtered_data.mask = False # Get views transposed appropriately for iteration # over the set (handling data & mask separately due to # NumPy issue #8506): data_T = np.rollaxis(filtered_data, model_set_axis, 0) mask_T = np.rollaxis(filtered_data.mask, model_set_axis, 0) if loop: model_vals_T = np.rollaxis(model_vals, model_set_axis, 0) for row_data, row_mask, row_mod_vals in zip( data_T, mask_T, model_vals_T ): masked_residuals = self.outlier_func( row_data - row_mod_vals, **self.outlier_kwargs ) row_data.data[:] = masked_residuals.data row_mask[:] = masked_residuals.mask # Issue speed warning after the fact, so it only shows up when # the TypeError is genuinely due to the axis argument. warnings.warn( "outlier_func did not accept axis argument; " "reverted to slow loop over models.", AstropyUserWarning, ) # Recombine newly-masked residuals with model to get masked values: filtered_data += model_vals # Re-fit the data after filtering, passing masked/unmasked values # for single models / sets, respectively: if model_set_axis is None: good = ~filtered_data.mask if weights is not None: filtered_weights = weights[good] fitted_model = self.fitter( fitted_model, *(c[good] for c in coords), filtered_data.data[good], weights=filtered_weights, **kwargs, ) else: fitted_model = self.fitter( fitted_model, *coords, filtered_data, weights=filtered_weights, **kwargs, ) # Stop iteration if the masked points are no longer changing (with # cumulative rejection we only need to compare how many there are): this_n_masked = filtered_data.mask.sum() # (minimal overhead) if this_n_masked == last_n_masked: break last_n_masked = this_n_masked self.fit_info = {"niter": n} self.fit_info.update(getattr(self.fitter, "fit_info", {})) return fitted_model, filtered_data.mask class _NonLinearLSQFitter(metaclass=_FitterMeta): """ Base class for Non-Linear least-squares fitters. Parameters ---------- calc_uncertainties : bool If the covarience matrix should be computed and set in the fit_info. Default: False use_min_max_bounds : bool If the set parameter bounds for a model will be enforced each given parameter while fitting via a simple min/max condition. Default: True """ supported_constraints = ["fixed", "tied", "bounds"] """ The constraint types supported by this fitter type. """ def __init__(self, calc_uncertainties=False, use_min_max_bounds=True): self.fit_info = None self._calc_uncertainties = calc_uncertainties self._use_min_max_bounds = use_min_max_bounds super().__init__() def objective_function(self, fps, *args): """ Function to minimize. Parameters ---------- fps : list parameters returned by the fitter args : list [model, [weights], [input coordinates]] """ model = args[0] weights = args[1] fitter_to_model_params(model, fps, self._use_min_max_bounds) meas = args[-1] if weights is None: value = np.ravel(model(*args[2:-1]) - meas) else: value = np.ravel(weights * (model(*args[2:-1]) - meas)) if not np.all(np.isfinite(value)): raise NonFiniteValueError( "Objective function has encountered a non-finite value, " "this will cause the fit to fail!\n" "Please remove non-finite values from your input data before " "fitting to avoid this error." ) return value @staticmethod def _add_fitting_uncertainties(model, cov_matrix): """ Set ``cov_matrix`` and ``stds`` attributes on model with parameter covariance matrix returned by ``optimize.leastsq``. """ free_param_names = [ x for x in model.fixed if (model.fixed[x] is False) and (model.tied[x] is False) ] model.cov_matrix = Covariance(cov_matrix, free_param_names) model.stds = StandardDeviations(cov_matrix, free_param_names) @staticmethod def _wrap_deriv(params, model, weights, x, y, z=None): """ Wraps the method calculating the Jacobian of the function to account for model constraints. `scipy.optimize.leastsq` expects the function derivative to have the above signature (parlist, (argtuple)). In order to accommodate model constraints, instead of using p directly, we set the parameter list in this function. """ if weights is None: weights = 1.0 if any(model.fixed.values()) or any(model.tied.values()): # update the parameters with the current values from the fitter fitter_to_model_params(model, params) if z is None: full = np.array(model.fit_deriv(x, *model.parameters)) if not model.col_fit_deriv: full_deriv = np.ravel(weights) * full.T else: full_deriv = np.ravel(weights) * full else: full = np.array( [np.ravel(_) for _ in model.fit_deriv(x, y, *model.parameters)] ) if not model.col_fit_deriv: full_deriv = np.ravel(weights) * full.T else: full_deriv = np.ravel(weights) * full pars = [getattr(model, name) for name in model.param_names] fixed = [par.fixed for par in pars] tied = [par.tied for par in pars] tied = list(np.where([par.tied is not False for par in pars], True, tied)) fix_and_tie = np.logical_or(fixed, tied) ind = np.logical_not(fix_and_tie) if not model.col_fit_deriv: residues = np.asarray(full_deriv[np.nonzero(ind)]).T else: residues = full_deriv[np.nonzero(ind)] return [np.ravel(_) for _ in residues] else: if z is None: fit_deriv = np.array(model.fit_deriv(x, *params)) try: output = np.array( [np.ravel(_) for _ in np.array(weights) * fit_deriv] ) if output.shape != fit_deriv.shape: output = np.array( [np.ravel(_) for _ in np.atleast_2d(weights).T * fit_deriv] ) return output except ValueError: return np.array( [ np.ravel(_) for _ in np.array(weights) * np.moveaxis(fit_deriv, -1, 0) ] ).transpose() else: if not model.col_fit_deriv: return [ np.ravel(_) for _ in ( np.ravel(weights) * np.array(model.fit_deriv(x, y, *params)).T ).T ] return [ np.ravel(_) for _ in weights * np.array(model.fit_deriv(x, y, *params)) ] def _compute_param_cov( self, model, y, init_values, cov_x, fitparams, farg, weights=None ): # now try to compute the true covariance matrix if (len(y) > len(init_values)) and cov_x is not None: self.fit_info["param_cov"] = cov_x if weights is None: # if there are no measurement uncertainties given in `weights`, # fall back on the default behavior in scipy.optimize.curve_fit # when `absolute_sigma == False`. If there are uncertainties, # assume they are "absolute" and not "relative". # For details, see curve_fit: # https://github.com/scipy/scipy/blob/ # c1ed5ece8ffbf05356a22a8106affcd11bd3aee0/scipy/ # optimize/_minpack_py.py#L591-L602 sum_sqrs = np.sum(self.objective_function(fitparams, *farg) ** 2) dof = len(y) - len(init_values) self.fit_info["param_cov"] *= sum_sqrs / dof else: self.fit_info["param_cov"] = None if self._calc_uncertainties is True: if self.fit_info["param_cov"] is not None: self._add_fitting_uncertainties(model, self.fit_info["param_cov"]) def _run_fitter(self, model, farg, maxiter, acc, epsilon, estimate_jacobian): return None, None, None def _filter_non_finite(self, x, y, z=None): """ Filter out non-finite values in x, y, z. Returns ------- x, y, z : ndarrays x, y, and z with non-finite values filtered out. """ MESSAGE = "Non-Finite input data has been removed by the fitter." if z is None: mask = np.isfinite(y) if not np.all(mask): warnings.warn(MESSAGE, AstropyUserWarning) return x[mask], y[mask], None else: mask = np.isfinite(z) if not np.all(mask): warnings.warn(MESSAGE, AstropyUserWarning) return x[mask], y[mask], z[mask] @fitter_unit_support def __call__( self, model, x, y, z=None, weights=None, maxiter=DEFAULT_MAXITER, acc=DEFAULT_ACC, epsilon=DEFAULT_EPS, estimate_jacobian=False, filter_non_finite=False, ): """ Fit data to this model. Parameters ---------- model : `~astropy.modeling.FittableModel` model to fit to x, y, z x : array input coordinates y : array input coordinates z : array, optional input coordinates weights : array, optional Weights for fitting. For data with Gaussian uncertainties, the weights should be 1/sigma. .. versionchanged:: 5.3 Calculate parameter covariances while accounting for ``weights`` as "absolute" inverse uncertainties. To recover the old behavior, choose ``weights=None``. maxiter : int maximum number of iterations acc : float Relative error desired in the approximate solution epsilon : float A suitable step length for the forward-difference approximation of the Jacobian (if model.fjac=None). If epsfcn is less than the machine precision, it is assumed that the relative errors in the functions are of the order of the machine precision. estimate_jacobian : bool If False (default) and if the model has a fit_deriv method, it will be used. Otherwise the Jacobian will be estimated. If True, the Jacobian will be estimated in any case. equivalencies : list or None, optional, keyword-only List of *additional* equivalencies that are should be applied in case x, y and/or z have units. Default is None. filter_non_finite : bool, optional Whether or not to filter data with non-finite values. Default is False Returns ------- model_copy : `~astropy.modeling.FittableModel` a copy of the input model with parameters set by the fitter """ model_copy = _validate_model(model, self.supported_constraints) model_copy.sync_constraints = False if filter_non_finite: x, y, z = self._filter_non_finite(x, y, z) farg = ( model_copy, weights, ) + _convert_input(x, y, z) init_values, fitparams, cov_x = self._run_fitter( model_copy, farg, maxiter, acc, epsilon, estimate_jacobian ) self._compute_param_cov( model_copy, y, init_values, cov_x, fitparams, farg, weights ) model.sync_constraints = True return model_copy class LevMarLSQFitter(_NonLinearLSQFitter): """ Levenberg-Marquardt algorithm and least squares statistic. Parameters ---------- calc_uncertainties : bool If the covarience matrix should be computed and set in the fit_info. Default: False Attributes ---------- fit_info : dict The `scipy.optimize.leastsq` result for the most recent fit (see notes). Notes ----- The ``fit_info`` dictionary contains the values returned by `scipy.optimize.leastsq` for the most recent fit, including the values from the ``infodict`` dictionary it returns. See the `scipy.optimize.leastsq` documentation for details on the meaning of these values. Note that the ``x`` return value is *not* included (as it is instead the parameter values of the returned model). Additionally, one additional element of ``fit_info`` is computed whenever a model is fit, with the key 'param_cov'. The corresponding value is the covariance matrix of the parameters as a 2D numpy array. The order of the matrix elements matches the order of the parameters in the fitted model (i.e., the same order as ``model.param_names``). """ def __init__(self, calc_uncertainties=False): super().__init__(calc_uncertainties) self.fit_info = { "nfev": None, "fvec": None, "fjac": None, "ipvt": None, "qtf": None, "message": None, "ierr": None, "param_jac": None, "param_cov": None, } def _run_fitter(self, model, farg, maxiter, acc, epsilon, estimate_jacobian): from scipy import optimize if model.fit_deriv is None or estimate_jacobian: dfunc = None else: dfunc = self._wrap_deriv init_values, _, _ = model_to_fit_params(model) fitparams, cov_x, dinfo, mess, ierr = optimize.leastsq( self.objective_function, init_values, args=farg, Dfun=dfunc, col_deriv=model.col_fit_deriv, maxfev=maxiter, epsfcn=epsilon, xtol=acc, full_output=True, ) fitter_to_model_params(model, fitparams) self.fit_info.update(dinfo) self.fit_info["cov_x"] = cov_x self.fit_info["message"] = mess self.fit_info["ierr"] = ierr if ierr not in [1, 2, 3, 4]: warnings.warn( "The fit may be unsuccessful; check " "fit_info['message'] for more information.", AstropyUserWarning, ) return init_values, fitparams, cov_x class _NLLSQFitter(_NonLinearLSQFitter): """ Wrapper class for `scipy.optimize.least_squares` method, which provides: - Trust Region Reflective - dogbox - Levenberg-Marqueardt algorithms using the least squares statistic. Parameters ---------- method : str ‘trf’ : Trust Region Reflective algorithm, particularly suitable for large sparse problems with bounds. Generally robust method. ‘dogbox’ : dogleg algorithm with rectangular trust regions, typical use case is small problems with bounds. Not recommended for problems with rank-deficient Jacobian. ‘lm’ : Levenberg-Marquardt algorithm as implemented in MINPACK. Doesn’t handle bounds and sparse Jacobians. Usually the most efficient method for small unconstrained problems. calc_uncertainties : bool If the covarience matrix should be computed and set in the fit_info. Default: False use_min_max_bounds: bool If the set parameter bounds for a model will be enforced each given parameter while fitting via a simple min/max condition. A True setting will replicate how LevMarLSQFitter enforces bounds. Default: False Attributes ---------- fit_info : A `scipy.optimize.OptimizeResult` class which contains all of the most recent fit information """ def __init__(self, method, calc_uncertainties=False, use_min_max_bounds=False): super().__init__(calc_uncertainties, use_min_max_bounds) self._method = method def _run_fitter(self, model, farg, maxiter, acc, epsilon, estimate_jacobian): from scipy import optimize from scipy.linalg import svd if model.fit_deriv is None or estimate_jacobian: dfunc = "2-point" else: def _dfunc(params, model, weights, x, y, z=None): if model.col_fit_deriv: return np.transpose( self._wrap_deriv(params, model, weights, x, y, z) ) else: return self._wrap_deriv(params, model, weights, x, y, z) dfunc = _dfunc init_values, _, bounds = model_to_fit_params(model) # Note, if use_min_max_bounds is True we are defaulting to enforcing bounds # using the old method employed by LevMarLSQFitter, this is different # from the method that optimize.least_squares employs to enforce bounds # thus we override the bounds being passed to optimize.least_squares so # that it will not enforce any bounding. if self._use_min_max_bounds: bounds = (-np.inf, np.inf) self.fit_info = optimize.least_squares( self.objective_function, init_values, args=farg, jac=dfunc, max_nfev=maxiter, diff_step=np.sqrt(epsilon), xtol=acc, method=self._method, bounds=bounds, ) # Adapted from ~scipy.optimize.minpack, see: # https://github.com/scipy/scipy/blob/47bb6febaa10658c72962b9615d5d5aa2513fa3a/scipy/optimize/minpack.py#L795-L816 # Do Moore-Penrose inverse discarding zero singular values. _, s, VT = svd(self.fit_info.jac, full_matrices=False) threshold = np.finfo(float).eps * max(self.fit_info.jac.shape) * s[0] s = s[s > threshold] VT = VT[: s.size] cov_x = np.dot(VT.T / s**2, VT) fitter_to_model_params(model, self.fit_info.x, False) if not self.fit_info.success: warnings.warn( f"The fit may be unsuccessful; check: \n {self.fit_info.message}", AstropyUserWarning, ) return init_values, self.fit_info.x, cov_x class TRFLSQFitter(_NLLSQFitter): """ Trust Region Reflective algorithm and least squares statistic. Parameters ---------- calc_uncertainties : bool If the covarience matrix should be computed and set in the fit_info. Default: False use_min_max_bounds: bool If the set parameter bounds for a model will be enforced each given parameter while fitting via a simple min/max condition. A True setting will replicate how LevMarLSQFitter enforces bounds. Default: False Attributes ---------- fit_info : A `scipy.optimize.OptimizeResult` class which contains all of the most recent fit information """ def __init__(self, calc_uncertainties=False, use_min_max_bounds=False): super().__init__("trf", calc_uncertainties, use_min_max_bounds) class DogBoxLSQFitter(_NLLSQFitter): """ DogBox algorithm and least squares statistic. Parameters ---------- calc_uncertainties : bool If the covarience matrix should be computed and set in the fit_info. Default: False use_min_max_bounds: bool If the set parameter bounds for a model will be enforced each given parameter while fitting via a simple min/max condition. A True setting will replicate how LevMarLSQFitter enforces bounds. Default: False Attributes ---------- fit_info : A `scipy.optimize.OptimizeResult` class which contains all of the most recent fit information """ def __init__(self, calc_uncertainties=False, use_min_max_bounds=False): super().__init__("dogbox", calc_uncertainties, use_min_max_bounds) class LMLSQFitter(_NLLSQFitter): """ `scipy.optimize.least_squares` Levenberg-Marquardt algorithm and least squares statistic. Parameters ---------- calc_uncertainties : bool If the covarience matrix should be computed and set in the fit_info. Default: False Attributes ---------- fit_info : A `scipy.optimize.OptimizeResult` class which contains all of the most recent fit information """ def __init__(self, calc_uncertainties=False): super().__init__("lm", calc_uncertainties, True) class SLSQPLSQFitter(Fitter): """ Sequential Least Squares Programming (SLSQP) optimization algorithm and least squares statistic. Raises ------ ModelLinearityError A linear model is passed to a nonlinear fitter Notes ----- See also the `~astropy.modeling.optimizers.SLSQP` optimizer. """ supported_constraints = SLSQP.supported_constraints def __init__(self): super().__init__(optimizer=SLSQP, statistic=leastsquare) self.fit_info = {} @fitter_unit_support def __call__(self, model, x, y, z=None, weights=None, **kwargs): """ Fit data to this model. Parameters ---------- model : `~astropy.modeling.FittableModel` model to fit to x, y, z x : array input coordinates y : array input coordinates z : array, optional input coordinates weights : array, optional Weights for fitting. For data with Gaussian uncertainties, the weights should be 1/sigma. kwargs : dict optional keyword arguments to be passed to the optimizer or the statistic verblevel : int 0-silent 1-print summary upon completion, 2-print summary after each iteration maxiter : int maximum number of iterations epsilon : float the step size for finite-difference derivative estimates acc : float Requested accuracy equivalencies : list or None, optional, keyword-only List of *additional* equivalencies that are should be applied in case x, y and/or z have units. Default is None. Returns ------- model_copy : `~astropy.modeling.FittableModel` a copy of the input model with parameters set by the fitter """ model_copy = _validate_model(model, self._opt_method.supported_constraints) model_copy.sync_constraints = False farg = _convert_input(x, y, z) farg = ( model_copy, weights, ) + farg init_values, _, _ = model_to_fit_params(model_copy) fitparams, self.fit_info = self._opt_method( self.objective_function, init_values, farg, **kwargs ) fitter_to_model_params(model_copy, fitparams) model_copy.sync_constraints = True return model_copy class SimplexLSQFitter(Fitter): """ Simplex algorithm and least squares statistic. Raises ------ `ModelLinearityError` A linear model is passed to a nonlinear fitter """ supported_constraints = Simplex.supported_constraints def __init__(self): super().__init__(optimizer=Simplex, statistic=leastsquare) self.fit_info = {} @fitter_unit_support def __call__(self, model, x, y, z=None, weights=None, **kwargs): """ Fit data to this model. Parameters ---------- model : `~astropy.modeling.FittableModel` model to fit to x, y, z x : array input coordinates y : array input coordinates z : array, optional input coordinates weights : array, optional Weights for fitting. For data with Gaussian uncertainties, the weights should be 1/sigma. kwargs : dict optional keyword arguments to be passed to the optimizer or the statistic maxiter : int maximum number of iterations acc : float Relative error in approximate solution equivalencies : list or None, optional, keyword-only List of *additional* equivalencies that are should be applied in case x, y and/or z have units. Default is None. Returns ------- model_copy : `~astropy.modeling.FittableModel` a copy of the input model with parameters set by the fitter """ model_copy = _validate_model(model, self._opt_method.supported_constraints) model_copy.sync_constraints = False farg = _convert_input(x, y, z) farg = ( model_copy, weights, ) + farg init_values, _, _ = model_to_fit_params(model_copy) fitparams, self.fit_info = self._opt_method( self.objective_function, init_values, farg, **kwargs ) fitter_to_model_params(model_copy, fitparams) model_copy.sync_constraints = True return model_copy class JointFitter(metaclass=_FitterMeta): """ Fit models which share a parameter. For example, fit two gaussians to two data sets but keep the FWHM the same. Parameters ---------- models : list a list of model instances jointparameters : list a list of joint parameters initvals : list a list of initial values """ def __init__(self, models, jointparameters, initvals): self.models = list(models) self.initvals = list(initvals) self.jointparams = jointparameters self._verify_input() self.fitparams = self.model_to_fit_params() # a list of model.n_inputs self.modeldims = [m.n_inputs for m in self.models] # sum all model dimensions self.ndim = np.sum(self.modeldims) def model_to_fit_params(self): fparams = [] fparams.extend(self.initvals) for model in self.models: params = model.parameters.tolist() joint_params = self.jointparams[model] param_metrics = model._param_metrics for param_name in joint_params: slice_ = param_metrics[param_name]["slice"] del params[slice_] fparams.extend(params) return fparams def objective_function(self, fps, *args): """ Function to minimize. Parameters ---------- fps : list the fitted parameters - result of an one iteration of the fitting algorithm args : dict tuple of measured and input coordinates args is always passed as a tuple from optimize.leastsq """ lstsqargs = list(args) fitted = [] fitparams = list(fps) numjp = len(self.initvals) # make a separate list of the joint fitted parameters jointfitparams = fitparams[:numjp] del fitparams[:numjp] for model in self.models: joint_params = self.jointparams[model] margs = lstsqargs[: model.n_inputs + 1] del lstsqargs[: model.n_inputs + 1] # separate each model separately fitted parameters numfp = len(model._parameters) - len(joint_params) mfparams = fitparams[:numfp] del fitparams[:numfp] # recreate the model parameters mparams = [] param_metrics = model._param_metrics for param_name in model.param_names: if param_name in joint_params: index = joint_params.index(param_name) # should do this with slices in case the # parameter is not a number mparams.extend([jointfitparams[index]]) else: slice_ = param_metrics[param_name]["slice"] plen = slice_.stop - slice_.start mparams.extend(mfparams[:plen]) del mfparams[:plen] modelfit = model.evaluate(margs[:-1], *mparams) fitted.extend(modelfit - margs[-1]) return np.ravel(fitted) def _verify_input(self): if len(self.models) <= 1: raise TypeError(f"Expected >1 models, {len(self.models)} is given") if len(self.jointparams.keys()) < 2: raise TypeError( "At least two parameters are expected, " f"{len(self.jointparams.keys())} is given" ) for j in self.jointparams.keys(): if len(self.jointparams[j]) != len(self.initvals): raise TypeError( f"{len(self.jointparams[j])} parameter(s) " f"provided but {len(self.initvals)} expected" ) def __call__(self, *args): """ Fit data to these models keeping some of the parameters common to the two models. """ from scipy import optimize if len(args) != reduce(lambda x, y: x + 1 + y + 1, self.modeldims): raise ValueError( f"Expected {reduce(lambda x, y: x + 1 + y + 1, self.modeldims)} " f"coordinates in args but {len(args)} provided" ) self.fitparams[:], _ = optimize.leastsq( self.objective_function, self.fitparams, args=args ) fparams = self.fitparams[:] numjp = len(self.initvals) # make a separate list of the joint fitted parameters jointfitparams = fparams[:numjp] del fparams[:numjp] for model in self.models: # extract each model's fitted parameters joint_params = self.jointparams[model] numfp = len(model._parameters) - len(joint_params) mfparams = fparams[:numfp] del fparams[:numfp] # recreate the model parameters mparams = [] param_metrics = model._param_metrics for param_name in model.param_names: if param_name in joint_params: index = joint_params.index(param_name) # should do this with slices in case the parameter # is not a number mparams.extend([jointfitparams[index]]) else: slice_ = param_metrics[param_name]["slice"] plen = slice_.stop - slice_.start mparams.extend(mfparams[:plen]) del mfparams[:plen] model.parameters = np.array(mparams) def _convert_input(x, y, z=None, n_models=1, model_set_axis=0): """Convert inputs to float arrays.""" x = np.asanyarray(x, dtype=float) y = np.asanyarray(y, dtype=float) if z is not None: z = np.asanyarray(z, dtype=float) data_ndim, data_shape = z.ndim, z.shape else: data_ndim, data_shape = y.ndim, y.shape # For compatibility with how the linear fitter code currently expects to # work, shift the dependent variable's axes to the expected locations if n_models > 1 or data_ndim > x.ndim: if (model_set_axis or 0) >= data_ndim: raise ValueError("model_set_axis out of range") if data_shape[model_set_axis] != n_models: raise ValueError( "Number of data sets (y or z array) is expected to equal " "the number of parameter sets" ) if z is None: # For a 1-D model the y coordinate's model-set-axis is expected to # be last, so that its first dimension is the same length as the x # coordinates. This is in line with the expectations of # numpy.linalg.lstsq: # https://numpy.org/doc/stable/reference/generated/numpy.linalg.lstsq.html # That is, each model should be represented by a column. TODO: # Obviously this is a detail of np.linalg.lstsq and should be # handled specifically by any fitters that use it... y = np.rollaxis(y, model_set_axis, y.ndim) data_shape = y.shape[:-1] else: # Shape of z excluding model_set_axis data_shape = z.shape[:model_set_axis] + z.shape[model_set_axis + 1 :] if z is None: if data_shape != x.shape: raise ValueError("x and y should have the same shape") farg = (x, y) else: if not (x.shape == y.shape == data_shape): raise ValueError("x, y and z should have the same shape") farg = (x, y, z) return farg # TODO: These utility functions are really particular to handling # bounds/tied/fixed constraints for scipy.optimize optimizers that do not # support them inherently; this needs to be reworked to be clear about this # distinction (and the fact that these are not necessarily applicable to any # arbitrary fitter--as evidenced for example by the fact that JointFitter has # its own versions of these) # TODO: Most of this code should be entirely rewritten; it should not be as # inefficient as it is. def fitter_to_model_params(model, fps, use_min_max_bounds=True): """ Constructs the full list of model parameters from the fitted and constrained parameters. Parameters ---------- model : The model being fit fps : The fit parameter values to be assigned use_min_max_bounds: bool If the set parameter bounds for model will be enforced on each parameter with bounds. Default: True """ _, fit_param_indices, _ = model_to_fit_params(model) has_tied = any(model.tied.values()) has_fixed = any(model.fixed.values()) has_bound = any(b != (None, None) for b in model.bounds.values()) parameters = model.parameters if not (has_tied or has_fixed or has_bound): # We can just assign directly model.parameters = fps return fit_param_indices = set(fit_param_indices) offset = 0 param_metrics = model._param_metrics for idx, name in enumerate(model.param_names): if idx not in fit_param_indices: continue slice_ = param_metrics[name]["slice"] shape = param_metrics[name]["shape"] # This is determining which range of fps (the fitted parameters) maps # to parameters of the model size = reduce(operator.mul, shape, 1) values = fps[offset : offset + size] # Check bounds constraints if model.bounds[name] != (None, None) and use_min_max_bounds: _min, _max = model.bounds[name] if _min is not None: values = np.fmax(values, _min) if _max is not None: values = np.fmin(values, _max) parameters[slice_] = values offset += size # Update model parameters before calling ``tied`` constraints. model._array_to_parameters() # This has to be done in a separate loop due to how tied parameters are # currently evaluated (the fitted parameters need to actually be *set* on # the model first, for use in evaluating the "tied" expression--it might be # better to change this at some point if has_tied: for idx, name in enumerate(model.param_names): if model.tied[name]: value = model.tied[name](model) slice_ = param_metrics[name]["slice"] # To handle multiple tied constraints, model parameters # need to be updated after each iteration. parameters[slice_] = value model._array_to_parameters() @deprecated("5.1", "private method: _fitter_to_model_params has been made public now") def _fitter_to_model_params(model, fps): return fitter_to_model_params(model, fps) def model_to_fit_params(model): """ Convert a model instance's parameter array to an array that can be used with a fitter that doesn't natively support fixed or tied parameters. In particular, it removes fixed/tied parameters from the parameter array. These may be a subset of the model parameters, if some of them are held constant or tied. """ fitparam_indices = list(range(len(model.param_names))) model_params = model.parameters model_bounds = list(model.bounds.values()) if any(model.fixed.values()) or any(model.tied.values()): params = list(model_params) param_metrics = model._param_metrics for idx, name in list(enumerate(model.param_names))[::-1]: if model.fixed[name] or model.tied[name]: slice_ = param_metrics[name]["slice"] del params[slice_] del model_bounds[slice_] del fitparam_indices[idx] model_params = np.array(params) for idx, bound in enumerate(model_bounds): if bound[0] is None: lower = -np.inf else: lower = bound[0] if bound[1] is None: upper = np.inf else: upper = bound[1] model_bounds[idx] = (lower, upper) model_bounds = tuple(zip(*model_bounds)) return model_params, fitparam_indices, model_bounds @deprecated("5.1", "private method: _model_to_fit_params has been made public now") def _model_to_fit_params(model): return model_to_fit_params(model) def _validate_constraints(supported_constraints, model): """Make sure model constraints are supported by the current fitter.""" message = "Optimizer cannot handle {0} constraints." if any(model.fixed.values()) and "fixed" not in supported_constraints: raise UnsupportedConstraintError(message.format("fixed parameter")) if any(model.tied.values()) and "tied" not in supported_constraints: raise UnsupportedConstraintError(message.format("tied parameter")) if ( any(tuple(b) != (None, None) for b in model.bounds.values()) and "bounds" not in supported_constraints ): raise UnsupportedConstraintError(message.format("bound parameter")) if model.eqcons and "eqcons" not in supported_constraints: raise UnsupportedConstraintError(message.format("equality")) if model.ineqcons and "ineqcons" not in supported_constraints: raise UnsupportedConstraintError(message.format("inequality")) def _validate_model(model, supported_constraints): """ Check that model and fitter are compatible and return a copy of the model. """ if not model.fittable: raise ValueError("Model does not appear to be fittable.") if model.linear: warnings.warn( "Model is linear in parameters; consider using linear fitting methods.", AstropyUserWarning, ) elif len(model) != 1: # for now only single data sets ca be fitted raise ValueError("Non-linear fitters can only fit one data set at a time.") _validate_constraints(supported_constraints, model) model_copy = model.copy() return model_copy def populate_entry_points(entry_points): """ This injects entry points into the `astropy.modeling.fitting` namespace. This provides a means of inserting a fitting routine without requirement of it being merged into astropy's core. Parameters ---------- entry_points : list of `~importlib.metadata.EntryPoint` entry_points are objects which encapsulate importable objects and are defined on the installation of a package. Notes ----- An explanation of entry points can be found `here <http://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins>`_ """ for entry_point in entry_points: name = entry_point.name try: entry_point = entry_point.load() except Exception as e: # This stops the fitting from choking if an entry_point produces an error. warnings.warn( AstropyUserWarning( f"{type(e).__name__} error occurred in entry point {name}." ) ) else: if not inspect.isclass(entry_point): warnings.warn( AstropyUserWarning( f"Modeling entry point {name} expected to be a Class." ) ) else: if issubclass(entry_point, Fitter): name = entry_point.__name__ globals()[name] = entry_point __all__.append(name) else: warnings.warn( AstropyUserWarning( f"Modeling entry point {name} expected to extend " "astropy.modeling.Fitter" ) ) def _populate_ep(): # TODO: Exclusively use select when Python minversion is 3.10 ep = entry_points() if hasattr(ep, "select"): populate_entry_points(ep.select(group="astropy.modeling")) else: populate_entry_points(ep.get("astropy.modeling", [])) _populate_ep()
0338b341818ceb8edc5e221a8896b18edf9126a13ebd0031a3ef175f071c1d81
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module defines base classes for all models. The base class of all models is `~astropy.modeling.Model`. `~astropy.modeling.FittableModel` is the base class for all fittable models. Fittable models can be linear or nonlinear in a regression analysis sense. All models provide a `__call__` method which performs the transformation in a purely mathematical way, i.e. the models are unitless. Model instances can represent either a single model, or a "model set" representing multiple copies of the same type of model, but with potentially different values of the parameters in each model making up the set. """ # pylint: disable=invalid-name, protected-access, redefined-outer-name import abc import copy import functools import inspect import itertools import operator import types from collections import defaultdict, deque from inspect import signature from itertools import chain import numpy as np from astropy.nddata.utils import add_array, extract_array from astropy.table import Table from astropy.units import Quantity, UnitsError, dimensionless_unscaled from astropy.units.utils import quantity_asanyarray from astropy.utils import ( IncompatibleShapeError, check_broadcast, find_current_module, indent, isiterable, metadata, sharedmethod, ) from astropy.utils.codegen import make_function_with_signature from .bounding_box import CompoundBoundingBox, ModelBoundingBox from .parameters import InputParameterError, Parameter, _tofloat, param_repr_oneline from .utils import ( _combine_equivalency_dict, _ConstraintsDict, _SpecialOperatorsDict, combine_labels, get_inputs_and_params, make_binary_operator_eval, ) __all__ = [ "Model", "FittableModel", "Fittable1DModel", "Fittable2DModel", "CompoundModel", "fix_inputs", "custom_model", "ModelDefinitionError", "bind_bounding_box", "bind_compound_bounding_box", ] def _model_oper(oper, **kwargs): """ Returns a function that evaluates a given Python arithmetic operator between two models. The operator should be given as a string, like ``'+'`` or ``'**'``. """ return lambda left, right: CompoundModel(oper, left, right, **kwargs) class ModelDefinitionError(TypeError): """Used for incorrect models definitions.""" class _ModelMeta(abc.ABCMeta): """ Metaclass for Model. Currently just handles auto-generating the param_names list based on Parameter descriptors declared at the class-level of Model subclasses. """ _is_dynamic = False """ This flag signifies whether this class was created in the "normal" way, with a class statement in the body of a module, as opposed to a call to `type` or some other metaclass constructor, such that the resulting class does not belong to a specific module. This is important for pickling of dynamic classes. This flag is always forced to False for new classes, so code that creates dynamic classes should manually set it to True on those classes when creating them. """ # Default empty dict for _parameters_, which will be empty on model # classes that don't have any Parameters def __new__(mcls, name, bases, members, **kwds): # See the docstring for _is_dynamic above if "_is_dynamic" not in members: members["_is_dynamic"] = mcls._is_dynamic opermethods = [ ("__add__", _model_oper("+")), ("__sub__", _model_oper("-")), ("__mul__", _model_oper("*")), ("__truediv__", _model_oper("/")), ("__pow__", _model_oper("**")), ("__or__", _model_oper("|")), ("__and__", _model_oper("&")), ("_fix_inputs", _model_oper("fix_inputs")), ] members["_parameters_"] = { k: v for k, v in members.items() if isinstance(v, Parameter) } for opermethod, opercall in opermethods: members[opermethod] = opercall cls = super().__new__(mcls, name, bases, members, **kwds) param_names = list(members["_parameters_"]) # Need to walk each base MRO to collect all parameter names for base in bases: for tbase in base.__mro__: if issubclass(tbase, Model): # Preserve order of definitions param_names = list(tbase._parameters_) + param_names # Remove duplicates (arising from redefinition in subclass). param_names = list(dict.fromkeys(param_names)) if cls._parameters_: if hasattr(cls, "_param_names"): # Slight kludge to support compound models, where # cls.param_names is a property; could be improved with a # little refactoring but fine for now cls._param_names = tuple(param_names) else: cls.param_names = tuple(param_names) return cls def __init__(cls, name, bases, members, **kwds): super().__init__(name, bases, members, **kwds) cls._create_inverse_property(members) cls._create_bounding_box_property(members) pdict = {} for base in bases: for tbase in base.__mro__: if issubclass(tbase, Model): for parname, val in cls._parameters_.items(): pdict[parname] = val cls._handle_special_methods(members, pdict) def __repr__(cls): """ Custom repr for Model subclasses. """ return cls._format_cls_repr() def _repr_pretty_(cls, p, cycle): """ Repr for IPython's pretty printer. By default IPython "pretty prints" classes, so we need to implement this so that IPython displays the custom repr for Models. """ p.text(repr(cls)) def __reduce__(cls): if not cls._is_dynamic: # Just return a string specifying where the class can be imported # from return cls.__name__ members = dict(cls.__dict__) # Delete any ABC-related attributes--these will be restored when # the class is reconstructed: for key in list(members): if key.startswith("_abc_"): del members[key] # Delete custom __init__ and __call__ if they exist: for key in ("__init__", "__call__"): if key in members: del members[key] return (type(cls), (cls.__name__, cls.__bases__, members)) @property def name(cls): """ The name of this model class--equivalent to ``cls.__name__``. This attribute is provided for symmetry with the `Model.name` attribute of model instances. """ return cls.__name__ @property def _is_concrete(cls): """ A class-level property that determines whether the class is a concrete implementation of a Model--i.e. it is not some abstract base class or internal implementation detail (i.e. begins with '_'). """ return not (cls.__name__.startswith("_") or inspect.isabstract(cls)) def rename(cls, name=None, inputs=None, outputs=None): """ Creates a copy of this model class with a new name, inputs or outputs. The new class is technically a subclass of the original class, so that instance and type checks will still work. For example:: >>> from astropy.modeling.models import Rotation2D >>> SkyRotation = Rotation2D.rename('SkyRotation') >>> SkyRotation <class 'astropy.modeling.core.SkyRotation'> Name: SkyRotation (Rotation2D) N_inputs: 2 N_outputs: 2 Fittable parameters: ('angle',) >>> issubclass(SkyRotation, Rotation2D) True >>> r = SkyRotation(90) >>> isinstance(r, Rotation2D) True """ mod = find_current_module(2) if mod: modname = mod.__name__ else: modname = "__main__" if name is None: name = cls.name if inputs is None: inputs = cls.inputs else: if not isinstance(inputs, tuple): raise TypeError("Expected 'inputs' to be a tuple of strings.") elif len(inputs) != len(cls.inputs): raise ValueError(f"{cls.name} expects {len(cls.inputs)} inputs") if outputs is None: outputs = cls.outputs else: if not isinstance(outputs, tuple): raise TypeError("Expected 'outputs' to be a tuple of strings.") elif len(outputs) != len(cls.outputs): raise ValueError(f"{cls.name} expects {len(cls.outputs)} outputs") new_cls = type(name, (cls,), {"inputs": inputs, "outputs": outputs}) new_cls.__module__ = modname new_cls.__qualname__ = name return new_cls def _create_inverse_property(cls, members): inverse = members.get("inverse") if inverse is None or cls.__bases__[0] is object: # The latter clause is the prevent the below code from running on # the Model base class, which implements the default getter and # setter for .inverse return if isinstance(inverse, property): # We allow the @property decorator to be omitted entirely from # the class definition, though its use should be encouraged for # clarity inverse = inverse.fget # Store the inverse getter internally, then delete the given .inverse # attribute so that cls.inverse resolves to Model.inverse instead cls._inverse = inverse del cls.inverse def _create_bounding_box_property(cls, members): """ Takes any bounding_box defined on a concrete Model subclass (either as a fixed tuple or a property or method) and wraps it in the generic getter/setter interface for the bounding_box attribute. """ # TODO: Much of this is verbatim from _create_inverse_property--I feel # like there could be a way to generify properties that work this way, # but for the time being that would probably only confuse things more. bounding_box = members.get("bounding_box") if bounding_box is None or cls.__bases__[0] is object: return if isinstance(bounding_box, property): bounding_box = bounding_box.fget if not callable(bounding_box): # See if it's a hard-coded bounding_box (as a sequence) and # normalize it try: bounding_box = ModelBoundingBox.validate( cls, bounding_box, _preserve_ignore=True ) except ValueError as exc: raise ModelDefinitionError(exc.args[0]) else: sig = signature(bounding_box) # May be a method that only takes 'self' as an argument (like a # property, but the @property decorator was forgotten) # # However, if the method takes additional arguments then this is a # parameterized bounding box and should be callable if len(sig.parameters) > 1: bounding_box = cls._create_bounding_box_subclass(bounding_box, sig) # See the Model.bounding_box getter definition for how this attribute # is used cls._bounding_box = bounding_box del cls.bounding_box def _create_bounding_box_subclass(cls, func, sig): """ For Models that take optional arguments for defining their bounding box, we create a subclass of ModelBoundingBox with a ``__call__`` method that supports those additional arguments. Takes the function's Signature as an argument since that is already computed in _create_bounding_box_property, so no need to duplicate that effort. """ # TODO: Might be convenient if calling the bounding box also # automatically sets the _user_bounding_box. So that # # >>> model.bounding_box(arg=1) # # in addition to returning the computed bbox, also sets it, so that # it's a shortcut for # # >>> model.bounding_box = model.bounding_box(arg=1) # # Not sure if that would be non-obvious / confusing though... def __call__(self, **kwargs): return func(self._model, **kwargs) kwargs = [] for idx, param in enumerate(sig.parameters.values()): if idx == 0: # Presumed to be a 'self' argument continue if param.default is param.empty: raise ModelDefinitionError( f"The bounding_box method for {cls.name} is not correctly " "defined: If defined as a method all arguments to that " "method (besides self) must be keyword arguments with " "default values that can be used to compute a default " "bounding box." ) kwargs.append((param.name, param.default)) __call__.__signature__ = sig return type( f"{cls.name}ModelBoundingBox", (ModelBoundingBox,), {"__call__": __call__} ) def _handle_special_methods(cls, members, pdict): # Handle init creation from inputs def update_wrapper(wrapper, cls): # Set up the new __call__'s metadata attributes as though it were # manually defined in the class definition # A bit like functools.update_wrapper but uses the class instead of # the wrapped function wrapper.__module__ = cls.__module__ wrapper.__doc__ = getattr(cls, wrapper.__name__).__doc__ if hasattr(cls, "__qualname__"): wrapper.__qualname__ = f"{cls.__qualname__}.{wrapper.__name__}" if ( "__call__" not in members and "n_inputs" in members and isinstance(members["n_inputs"], int) and members["n_inputs"] > 0 ): # Don't create a custom __call__ for classes that already have one # explicitly defined (this includes the Model base class, and any # other classes that manually override __call__ def __call__(self, *inputs, **kwargs): """Evaluate this model on the supplied inputs.""" return super(cls, self).__call__(*inputs, **kwargs) # When called, models can take two optional keyword arguments: # # * model_set_axis, which indicates (for multi-dimensional input) # which axis is used to indicate different models # # * equivalencies, a dictionary of equivalencies to be applied to # the input values, where each key should correspond to one of # the inputs. # # The following code creates the __call__ function with these # two keyword arguments. args = ("self",) kwargs = { "model_set_axis": None, "with_bounding_box": False, "fill_value": np.nan, "equivalencies": None, "inputs_map": None, } new_call = make_function_with_signature( __call__, args, kwargs, varargs="inputs", varkwargs="new_inputs" ) # The following makes it look like __call__ # was defined in the class update_wrapper(new_call, cls) cls.__call__ = new_call if ( "__init__" not in members and not inspect.isabstract(cls) and cls._parameters_ ): # Build list of all parameters including inherited ones # If *all* the parameters have default values we can make them # keyword arguments; otherwise they must all be positional # arguments if all(p.default is not None for p in pdict.values()): args = ("self",) kwargs = [] for param_name, param_val in pdict.items(): default = param_val.default unit = param_val.unit # If the unit was specified in the parameter but the # default is not a Quantity, attach the unit to the # default. if unit is not None: default = Quantity(default, unit, copy=False, subok=True) kwargs.append((param_name, default)) else: args = ("self",) + tuple(pdict.keys()) kwargs = {} def __init__(self, *params, **kwargs): return super(cls, self).__init__(*params, **kwargs) new_init = make_function_with_signature( __init__, args, kwargs, varkwargs="kwargs" ) update_wrapper(new_init, cls) cls.__init__ = new_init # *** Arithmetic operators for creating compound models *** __add__ = _model_oper("+") __sub__ = _model_oper("-") __mul__ = _model_oper("*") __truediv__ = _model_oper("/") __pow__ = _model_oper("**") __or__ = _model_oper("|") __and__ = _model_oper("&") _fix_inputs = _model_oper("fix_inputs") # *** Other utilities *** def _format_cls_repr(cls, keywords=[]): """ Internal implementation of ``__repr__``. This is separated out for ease of use by subclasses that wish to override the default ``__repr__`` while keeping the same basic formatting. """ # For the sake of familiarity start the output with the standard class # __repr__ parts = [super().__repr__()] if not cls._is_concrete: return parts[0] def format_inheritance(cls): bases = [] for base in cls.mro()[1:]: if not issubclass(base, Model): continue elif inspect.isabstract(base) or base.__name__.startswith("_"): break bases.append(base.name) if bases: return f"{cls.name} ({' -> '.join(bases)})" return cls.name try: default_keywords = [ ("Name", format_inheritance(cls)), ("N_inputs", cls.n_inputs), ("N_outputs", cls.n_outputs), ] if cls.param_names: default_keywords.append(("Fittable parameters", cls.param_names)) for keyword, value in default_keywords + keywords: if value is not None: parts.append(f"{keyword}: {value}") return "\n".join(parts) except Exception: # If any of the above formatting fails fall back on the basic repr # (this is particularly useful in debugging) return parts[0] class Model(metaclass=_ModelMeta): """ Base class for all models. This is an abstract class and should not be instantiated directly. The following initialization arguments apply to the majority of Model subclasses by default (exceptions include specialized utility models like `~astropy.modeling.mappings.Mapping`). Parametric models take all their parameters as arguments, followed by any of the following optional keyword arguments: Parameters ---------- name : str, optional A human-friendly name associated with this model instance (particularly useful for identifying the individual components of a compound model). meta : dict, optional An optional dict of user-defined metadata to attach to this model. How this is used and interpreted is up to the user or individual use case. n_models : int, optional If given an integer greater than 1, a *model set* is instantiated instead of a single model. This affects how the parameter arguments are interpreted. In this case each parameter must be given as a list or array--elements of this array are taken along the first axis (or ``model_set_axis`` if specified), such that the Nth element is the value of that parameter for the Nth model in the set. See the section on model sets in the documentation for more details. model_set_axis : int, optional This argument only applies when creating a model set (i.e. ``n_models > 1``). It changes how parameter values are interpreted. Normally the first axis of each input parameter array (properly the 0th axis) is taken as the axis corresponding to the model sets. However, any axis of an input array may be taken as this "model set axis". This accepts negative integers as well--for example use ``model_set_axis=-1`` if the last (most rapidly changing) axis should be associated with the model sets. Also, ``model_set_axis=False`` can be used to tell that a given input should be used to evaluate all the models in the model set. fixed : dict, optional Dictionary ``{parameter_name: bool}`` setting the fixed constraint for one or more parameters. `True` means the parameter is held fixed during fitting and is prevented from updates once an instance of the model has been created. Alternatively the `~astropy.modeling.Parameter.fixed` property of a parameter may be used to lock or unlock individual parameters. tied : dict, optional Dictionary ``{parameter_name: callable}`` of parameters which are linked to some other parameter. The dictionary values are callables providing the linking relationship. Alternatively the `~astropy.modeling.Parameter.tied` property of a parameter may be used to set the ``tied`` constraint on individual parameters. bounds : dict, optional A dictionary ``{parameter_name: value}`` of lower and upper bounds of parameters. Keys are parameter names. Values are a list or a tuple of length 2 giving the desired range for the parameter. Alternatively the `~astropy.modeling.Parameter.min` and `~astropy.modeling.Parameter.max` or ~astropy.modeling.Parameter.bounds` properties of a parameter may be used to set bounds on individual parameters. eqcons : list, optional List of functions of length n such that ``eqcons[j](x0, *args) == 0.0`` in a successfully optimized problem. ineqcons : list, optional List of functions of length n such that ``ieqcons[j](x0, *args) >= 0.0`` is a successfully optimized problem. Examples -------- >>> from astropy.modeling import models >>> def tie_center(model): ... mean = 50 * model.stddev ... return mean >>> tied_parameters = {'mean': tie_center} Specify that ``'mean'`` is a tied parameter in one of two ways: >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3, ... tied=tied_parameters) or >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3) >>> g1.mean.tied False >>> g1.mean.tied = tie_center >>> g1.mean.tied <function tie_center at 0x...> Fixed parameters: >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3, ... fixed={'stddev': True}) >>> g1.stddev.fixed True or >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3) >>> g1.stddev.fixed False >>> g1.stddev.fixed = True >>> g1.stddev.fixed True """ parameter_constraints = Parameter.constraints """ Primarily for informational purposes, these are the types of constraints that can be set on a model's parameters. """ model_constraints = ("eqcons", "ineqcons") """ Primarily for informational purposes, these are the types of constraints that constrain model evaluation. """ param_names = () """ Names of the parameters that describe models of this type. The parameters in this tuple are in the same order they should be passed in when initializing a model of a specific type. Some types of models, such as polynomial models, have a different number of parameters depending on some other property of the model, such as the degree. When defining a custom model class the value of this attribute is automatically set by the `~astropy.modeling.Parameter` attributes defined in the class body. """ n_inputs = 0 """The number of inputs.""" n_outputs = 0 """ The number of outputs.""" standard_broadcasting = True fittable = False linear = True _separable = None """ A boolean flag to indicate whether a model is separable.""" meta = metadata.MetaData() """A dict-like object to store optional information.""" # By default models either use their own inverse property or have no # inverse at all, but users may also assign a custom inverse to a model, # optionally; in that case it is of course up to the user to determine # whether their inverse is *actually* an inverse to the model they assign # it to. _inverse = None _user_inverse = None _bounding_box = None _user_bounding_box = None _has_inverse_bounding_box = False # Default n_models attribute, so that __len__ is still defined even when a # model hasn't completed initialization yet _n_models = 1 # New classes can set this as a boolean value. # It is converted to a dictionary mapping input name to a boolean value. _input_units_strict = False # Allow dimensionless input (and corresponding output). If this is True, # input values to evaluate will gain the units specified in input_units. If # this is a dictionary then it should map input name to a bool to allow # dimensionless numbers for that input. # Only has an effect if input_units is defined. _input_units_allow_dimensionless = False # Default equivalencies to apply to input values. If set, this should be a # dictionary where each key is a string that corresponds to one of the # model inputs. Only has an effect if input_units is defined. input_units_equivalencies = None # Covariance matrix can be set by fitter if available. # If cov_matrix is available, then std will set as well _cov_matrix = None _stds = None def __init_subclass__(cls, **kwargs): super().__init_subclass__() def __init__(self, *args, meta=None, name=None, **kwargs): super().__init__() self._default_inputs_outputs() if meta is not None: self.meta = meta self._name = name # add parameters to instance level by walking MRO list mro = self.__class__.__mro__ for cls in mro: if issubclass(cls, Model): for parname, val in cls._parameters_.items(): newpar = copy.deepcopy(val) newpar.model = self if parname not in self.__dict__: self.__dict__[parname] = newpar self._initialize_constraints(kwargs) kwargs = self._initialize_setters(kwargs) # Remaining keyword args are either parameter values or invalid # Parameter values must be passed in as keyword arguments in order to # distinguish them self._initialize_parameters(args, kwargs) self._initialize_slices() self._initialize_unit_support() def _default_inputs_outputs(self): if self.n_inputs == 1 and self.n_outputs == 1: self._inputs = ("x",) self._outputs = ("y",) elif self.n_inputs == 2 and self.n_outputs == 1: self._inputs = ("x", "y") self._outputs = ("z",) else: try: self._inputs = tuple("x" + str(idx) for idx in range(self.n_inputs)) self._outputs = tuple("x" + str(idx) for idx in range(self.n_outputs)) except TypeError: # self.n_inputs and self.n_outputs are properties # This is the case when subclasses of Model do not define # ``n_inputs``, ``n_outputs``, ``inputs`` or ``outputs``. self._inputs = () self._outputs = () def _initialize_setters(self, kwargs): """ This exists to inject defaults for settable properties for models originating from `custom_model`. """ if hasattr(self, "_settable_properties"): setters = { name: kwargs.pop(name, default) for name, default in self._settable_properties.items() } for name, value in setters.items(): setattr(self, name, value) return kwargs @property def inputs(self): return self._inputs @inputs.setter def inputs(self, val): if len(val) != self.n_inputs: raise ValueError( f"Expected {self.n_inputs} number of inputs, got {len(val)}." ) self._inputs = val self._initialize_unit_support() @property def outputs(self): return self._outputs @outputs.setter def outputs(self, val): if len(val) != self.n_outputs: raise ValueError( f"Expected {self.n_outputs} number of outputs, got {len(val)}." ) self._outputs = val @property def n_inputs(self): # TODO: remove the code in the ``if`` block when support # for models with ``inputs`` as class variables is removed. if hasattr(self.__class__, "n_inputs") and isinstance( self.__class__.n_inputs, property ): try: return len(self.__class__.inputs) except TypeError: try: return len(self.inputs) except AttributeError: return 0 return self.__class__.n_inputs @property def n_outputs(self): # TODO: remove the code in the ``if`` block when support # for models with ``outputs`` as class variables is removed. if hasattr(self.__class__, "n_outputs") and isinstance( self.__class__.n_outputs, property ): try: return len(self.__class__.outputs) except TypeError: try: return len(self.outputs) except AttributeError: return 0 return self.__class__.n_outputs def _calculate_separability_matrix(self): """ This is a hook which customises the behavior of modeling.separable. This allows complex subclasses to customise the separability matrix. If it returns `NotImplemented` the default behavior is used. """ return NotImplemented def _initialize_unit_support(self): """ Convert self._input_units_strict and self.input_units_allow_dimensionless to dictionaries mapping input name to a boolean value. """ if isinstance(self._input_units_strict, bool): self._input_units_strict = { key: self._input_units_strict for key in self.inputs } if isinstance(self._input_units_allow_dimensionless, bool): self._input_units_allow_dimensionless = { key: self._input_units_allow_dimensionless for key in self.inputs } @property def input_units_strict(self): """ Enforce strict units on inputs to evaluate. If this is set to True, input values to evaluate will be in the exact units specified by input_units. If the input quantities are convertible to input_units, they are converted. If this is a dictionary then it should map input name to a bool to set strict input units for that parameter. """ val = self._input_units_strict if isinstance(val, bool): return {key: val for key in self.inputs} return dict(zip(self.inputs, val.values())) @property def input_units_allow_dimensionless(self): """ Allow dimensionless input (and corresponding output). If this is True, input values to evaluate will gain the units specified in input_units. If this is a dictionary then it should map input name to a bool to allow dimensionless numbers for that input. Only has an effect if input_units is defined. """ val = self._input_units_allow_dimensionless if isinstance(val, bool): return {key: val for key in self.inputs} return dict(zip(self.inputs, val.values())) @property def uses_quantity(self): """ True if this model has been created with `~astropy.units.Quantity` objects or if there are no parameters. This can be used to determine if this model should be evaluated with `~astropy.units.Quantity` or regular floats. """ pisq = [isinstance(p, Quantity) for p in self._param_sets(units=True)] return (len(pisq) == 0) or any(pisq) def __repr__(self): return self._format_repr() def __str__(self): return self._format_str() def __len__(self): return self._n_models @staticmethod def _strip_ones(intup): return tuple(item for item in intup if item != 1) def __setattr__(self, attr, value): if isinstance(self, CompoundModel): param_names = self._param_names param_names = self.param_names if param_names is not None and attr in self.param_names: param = self.__dict__[attr] value = _tofloat(value) if param._validator is not None: param._validator(self, value) # check consistency with previous shape and size eshape = self._param_metrics[attr]["shape"] if eshape == (): eshape = (1,) vshape = np.array(value).shape if vshape == (): vshape = (1,) esize = self._param_metrics[attr]["size"] if np.size(value) != esize or self._strip_ones(vshape) != self._strip_ones( eshape ): raise InputParameterError( f"Value for parameter {attr} does not match shape or size\nexpected" f" by model ({vshape}, {np.size(value)}) vs ({eshape}, {esize})" ) if param.unit is None: if isinstance(value, Quantity): param._unit = value.unit param.value = value.value else: param.value = value else: if not isinstance(value, Quantity): raise UnitsError( f"The '{param.name}' parameter should be given as a" " Quantity because it was originally " "initialized as a Quantity" ) param._unit = value.unit param.value = value.value else: if attr in ["fittable", "linear"]: self.__dict__[attr] = value else: super().__setattr__(attr, value) def _pre_evaluate(self, *args, **kwargs): """ Model specific input setup that needs to occur prior to model evaluation. """ # Broadcast inputs into common size inputs, broadcasted_shapes = self.prepare_inputs(*args, **kwargs) # Setup actual model evaluation method parameters = self._param_sets(raw=True, units=True) def evaluate(_inputs): return self.evaluate(*chain(_inputs, parameters)) return evaluate, inputs, broadcasted_shapes, kwargs def get_bounding_box(self, with_bbox=True): """ Return the ``bounding_box`` of a model if it exists or ``None`` otherwise. Parameters ---------- with_bbox : The value of the ``with_bounding_box`` keyword argument when calling the model. Default is `True` for usage when looking up the model's ``bounding_box`` without risk of error. """ bbox = None if not isinstance(with_bbox, bool) or with_bbox: try: bbox = self.bounding_box except NotImplementedError: pass if isinstance(bbox, CompoundBoundingBox) and not isinstance( with_bbox, bool ): bbox = bbox[with_bbox] return bbox @property def _argnames(self): """The inputs used to determine input_shape for bounding_box evaluation.""" return self.inputs def _validate_input_shape( self, _input, idx, argnames, model_set_axis, check_model_set_axis ): """Perform basic validation of a single model input's shape. The shape has the minimum dimensions for the given model_set_axis. Returns the shape of the input if validation succeeds. """ input_shape = np.shape(_input) # Ensure that the input's model_set_axis matches the model's # n_models if input_shape and check_model_set_axis: # Note: Scalar inputs *only* get a pass on this if len(input_shape) < model_set_axis + 1: raise ValueError( f"For model_set_axis={model_set_axis}, all inputs must be at " f"least {model_set_axis + 1}-dimensional." ) if input_shape[model_set_axis] != self._n_models: try: argname = argnames[idx] except IndexError: # the case of model.inputs = () argname = str(idx) raise ValueError( f"Input argument '{argname}' does not have the correct dimensions" f" in model_set_axis={model_set_axis} for a model set with" f" n_models={self._n_models}." ) return input_shape def _validate_input_shapes(self, inputs, argnames, model_set_axis): """ Perform basic validation of model inputs --that they are mutually broadcastable and that they have the minimum dimensions for the given model_set_axis. If validation succeeds, returns the total shape that will result from broadcasting the input arrays with each other. """ check_model_set_axis = self._n_models > 1 and model_set_axis is not False all_shapes = [] for idx, _input in enumerate(inputs): all_shapes.append( self._validate_input_shape( _input, idx, argnames, model_set_axis, check_model_set_axis ) ) input_shape = check_broadcast(*all_shapes) if input_shape is None: raise ValueError( "All inputs must have identical shapes or must be scalars." ) return input_shape def input_shape(self, inputs): """Get input shape for bounding_box evaluation.""" return self._validate_input_shapes(inputs, self._argnames, self.model_set_axis) def _generic_evaluate(self, evaluate, _inputs, fill_value, with_bbox): """Generic model evaluation routine. Selects and evaluates model with or without bounding_box enforcement. """ # Evaluate the model using the prepared evaluation method either # enforcing the bounding_box or not. bbox = self.get_bounding_box(with_bbox) if (not isinstance(with_bbox, bool) or with_bbox) and bbox is not None: outputs = bbox.evaluate(evaluate, _inputs, fill_value) else: outputs = evaluate(_inputs) return outputs def _post_evaluate(self, inputs, outputs, broadcasted_shapes, with_bbox, **kwargs): """ Model specific post evaluation processing of outputs. """ if self.get_bounding_box(with_bbox) is None and self.n_outputs == 1: outputs = (outputs,) outputs = self.prepare_outputs(broadcasted_shapes, *outputs, **kwargs) outputs = self._process_output_units(inputs, outputs) if self.n_outputs == 1: return outputs[0] return outputs @property def bbox_with_units(self): return not isinstance(self, CompoundModel) def __call__(self, *args, **kwargs): """ Evaluate this model using the given input(s) and the parameter values that were specified when the model was instantiated. """ # Turn any keyword arguments into positional arguments. args, kwargs = self._get_renamed_inputs_as_positional(*args, **kwargs) # Read model evaluation related parameters with_bbox = kwargs.pop("with_bounding_box", False) fill_value = kwargs.pop("fill_value", np.nan) # prepare for model evaluation (overridden in CompoundModel) evaluate, inputs, broadcasted_shapes, kwargs = self._pre_evaluate( *args, **kwargs ) outputs = self._generic_evaluate(evaluate, inputs, fill_value, with_bbox) # post-process evaluation results (overridden in CompoundModel) return self._post_evaluate( inputs, outputs, broadcasted_shapes, with_bbox, **kwargs ) def _get_renamed_inputs_as_positional(self, *args, **kwargs): def _keyword2positional(kwargs): # Inputs were passed as keyword (not positional) arguments. # Because the signature of the ``__call__`` is defined at # the class level, the name of the inputs cannot be changed at # the instance level and the old names are always present in the # signature of the method. In order to use the new names of the # inputs, the old names are taken out of ``kwargs``, the input # values are sorted in the order of self.inputs and passed as # positional arguments to ``__call__``. # These are the keys that are always present as keyword arguments. keys = [ "model_set_axis", "with_bounding_box", "fill_value", "equivalencies", "inputs_map", ] new_inputs = {} # kwargs contain the names of the new inputs + ``keys`` allkeys = list(kwargs.keys()) # Remove the names of the new inputs from kwargs and save them # to a dict ``new_inputs``. for key in allkeys: if key not in keys: new_inputs[key] = kwargs[key] del kwargs[key] return new_inputs, kwargs n_args = len(args) new_inputs, kwargs = _keyword2positional(kwargs) n_all_args = n_args + len(new_inputs) if n_all_args < self.n_inputs: raise ValueError( f"Missing input arguments - expected {self.n_inputs}, got {n_all_args}" ) elif n_all_args > self.n_inputs: raise ValueError( f"Too many input arguments - expected {self.n_inputs}, got {n_all_args}" ) if n_args == 0: # Create positional arguments from the keyword arguments in ``new_inputs``. new_args = [] for k in self.inputs: new_args.append(new_inputs[k]) elif n_args != self.n_inputs: # Some inputs are passed as positional, others as keyword arguments. args = list(args) # Create positional arguments from the keyword arguments in ``new_inputs``. new_args = [] for k in self.inputs: if k in new_inputs: new_args.append(new_inputs[k]) else: new_args.append(args[0]) del args[0] else: new_args = args return new_args, kwargs # *** Properties *** @property def name(self): """User-provided name for this model instance.""" return self._name @name.setter def name(self, val): """Assign a (new) name to this model.""" self._name = val @property def model_set_axis(self): """ The index of the model set axis--that is the axis of a parameter array that pertains to which model a parameter value pertains to--as specified when the model was initialized. See the documentation on :ref:`astropy:modeling-model-sets` for more details. """ return self._model_set_axis @property def param_sets(self): """ Return parameters as a pset. This is a list with one item per parameter set, which is an array of that parameter's values across all parameter sets, with the last axis associated with the parameter set. """ return self._param_sets() @property def parameters(self): """ A flattened array of all parameter values in all parameter sets. Fittable parameters maintain this list and fitters modify it. """ # Currently the sequence of a model's parameters must be contiguous # within the _parameters array (which may be a view of a larger array, # for example when taking a sub-expression of a compound model), so # the assumption here is reliable: if not self.param_names: # Trivial, but not unheard of return self._parameters self._parameters_to_array() start = self._param_metrics[self.param_names[0]]["slice"].start stop = self._param_metrics[self.param_names[-1]]["slice"].stop return self._parameters[start:stop] @parameters.setter def parameters(self, value): """ Assigning to this attribute updates the parameters array rather than replacing it. """ if not self.param_names: return start = self._param_metrics[self.param_names[0]]["slice"].start stop = self._param_metrics[self.param_names[-1]]["slice"].stop try: value = np.array(value).flatten() self._parameters[start:stop] = value except ValueError as e: raise InputParameterError( "Input parameter values not compatible with the model " f"parameters array: {e!r}" ) self._array_to_parameters() @property def sync_constraints(self): """ This is a boolean property that indicates whether or not accessing constraints automatically check the constituent models current values. It defaults to True on creation of a model, but for fitting purposes it should be set to False for performance reasons. """ if not hasattr(self, "_sync_constraints"): self._sync_constraints = True return self._sync_constraints @sync_constraints.setter def sync_constraints(self, value): if not isinstance(value, bool): raise ValueError("sync_constraints only accepts True or False as values") self._sync_constraints = value @property def fixed(self): """ A ``dict`` mapping parameter names to their fixed constraint. """ if not hasattr(self, "_fixed") or self.sync_constraints: self._fixed = _ConstraintsDict(self, "fixed") return self._fixed @property def bounds(self): """ A ``dict`` mapping parameter names to their upper and lower bounds as ``(min, max)`` tuples or ``[min, max]`` lists. """ if not hasattr(self, "_bounds") or self.sync_constraints: self._bounds = _ConstraintsDict(self, "bounds") return self._bounds @property def tied(self): """ A ``dict`` mapping parameter names to their tied constraint. """ if not hasattr(self, "_tied") or self.sync_constraints: self._tied = _ConstraintsDict(self, "tied") return self._tied @property def eqcons(self): """List of parameter equality constraints.""" return self._mconstraints["eqcons"] @property def ineqcons(self): """List of parameter inequality constraints.""" return self._mconstraints["ineqcons"] def has_inverse(self): """ Returns True if the model has an analytic or user inverse defined. """ try: self.inverse except NotImplementedError: return False return True @property def inverse(self): """ Returns a new `~astropy.modeling.Model` instance which performs the inverse transform, if an analytic inverse is defined for this model. Even on models that don't have an inverse defined, this property can be set with a manually-defined inverse, such a pre-computed or experimentally determined inverse (often given as a `~astropy.modeling.polynomial.PolynomialModel`, but not by requirement). A custom inverse can be deleted with ``del model.inverse``. In this case the model's inverse is reset to its default, if a default exists (otherwise the default is to raise `NotImplementedError`). Note to authors of `~astropy.modeling.Model` subclasses: To define an inverse for a model simply override this property to return the appropriate model representing the inverse. The machinery that will make the inverse manually-overridable is added automatically by the base class. """ if self._user_inverse is not None: return self._user_inverse elif self._inverse is not None: result = self._inverse() if result is not NotImplemented: if not self._has_inverse_bounding_box: result.bounding_box = None return result raise NotImplementedError( "No analytical or user-supplied inverse transform " "has been implemented for this model." ) @inverse.setter def inverse(self, value): if not isinstance(value, (Model, type(None))): raise ValueError( "The ``inverse`` attribute may be assigned a `Model` " "instance or `None` (where `None` explicitly forces the " "model to have no inverse." ) self._user_inverse = value @inverse.deleter def inverse(self): """ Resets the model's inverse to its default (if one exists, otherwise the model will have no inverse). """ try: del self._user_inverse except AttributeError: pass @property def has_user_inverse(self): """ A flag indicating whether or not a custom inverse model has been assigned to this model by a user, via assignment to ``model.inverse``. """ return self._user_inverse is not None @property def bounding_box(self): r""" A `tuple` of length `n_inputs` defining the bounding box limits, or raise `NotImplementedError` for no bounding_box. The default limits are given by a ``bounding_box`` property or method defined in the class body of a specific model. If not defined then this property just raises `NotImplementedError` by default (but may be assigned a custom value by a user). ``bounding_box`` can be set manually to an array-like object of shape ``(model.n_inputs, 2)``. For further usage, see :ref:`astropy:bounding-boxes` The limits are ordered according to the `numpy` ``'C'`` indexing convention, and are the reverse of the model input order, e.g. for inputs ``('x', 'y', 'z')``, ``bounding_box`` is defined: * for 1D: ``(x_low, x_high)`` * for 2D: ``((y_low, y_high), (x_low, x_high))`` * for 3D: ``((z_low, z_high), (y_low, y_high), (x_low, x_high))`` Examples -------- Setting the ``bounding_box`` limits for a 1D and 2D model: >>> from astropy.modeling.models import Gaussian1D, Gaussian2D >>> model_1d = Gaussian1D() >>> model_2d = Gaussian2D(x_stddev=1, y_stddev=1) >>> model_1d.bounding_box = (-5, 5) >>> model_2d.bounding_box = ((-6, 6), (-5, 5)) Setting the bounding_box limits for a user-defined 3D `custom_model`: >>> from astropy.modeling.models import custom_model >>> def const3d(x, y, z, amp=1): ... return amp ... >>> Const3D = custom_model(const3d) >>> model_3d = Const3D() >>> model_3d.bounding_box = ((-6, 6), (-5, 5), (-4, 4)) To reset ``bounding_box`` to its default limits just delete the user-defined value--this will reset it back to the default defined on the class: >>> del model_1d.bounding_box To disable the bounding box entirely (including the default), set ``bounding_box`` to `None`: >>> model_1d.bounding_box = None >>> model_1d.bounding_box # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): NotImplementedError: No bounding box is defined for this model (note: the bounding box was explicitly disabled for this model; use `del model.bounding_box` to restore the default bounding box, if one is defined for this model). """ if self._user_bounding_box is not None: if self._user_bounding_box is NotImplemented: raise NotImplementedError( "No bounding box is defined for this model (note: the " "bounding box was explicitly disabled for this model; " "use `del model.bounding_box` to restore the default " "bounding box, if one is defined for this model)." ) return self._user_bounding_box elif self._bounding_box is None: raise NotImplementedError("No bounding box is defined for this model.") elif isinstance(self._bounding_box, ModelBoundingBox): # This typically implies a hard-coded bounding box. This will # probably be rare, but it is an option return self._bounding_box elif isinstance(self._bounding_box, types.MethodType): return ModelBoundingBox.validate(self, self._bounding_box()) else: # The only other allowed possibility is that it's a ModelBoundingBox # subclass, so we call it with its default arguments and return an # instance of it (that can be called to recompute the bounding box # with any optional parameters) # (In other words, in this case self._bounding_box is a *class*) bounding_box = self._bounding_box((), model=self)() return self._bounding_box(bounding_box, model=self) @bounding_box.setter def bounding_box(self, bounding_box): """ Assigns the bounding box limits. """ if bounding_box is None: cls = None # We use this to explicitly set an unimplemented bounding box (as # opposed to no user bounding box defined) bounding_box = NotImplemented elif isinstance(bounding_box, (CompoundBoundingBox, dict)): cls = CompoundBoundingBox elif isinstance(self._bounding_box, type) and issubclass( self._bounding_box, ModelBoundingBox ): cls = self._bounding_box else: cls = ModelBoundingBox if cls is not None: try: bounding_box = cls.validate(self, bounding_box, _preserve_ignore=True) except ValueError as exc: raise ValueError(exc.args[0]) self._user_bounding_box = bounding_box def set_slice_args(self, *args): if isinstance(self._user_bounding_box, CompoundBoundingBox): self._user_bounding_box.slice_args = args else: raise RuntimeError("The bounding_box for this model is not compound") @bounding_box.deleter def bounding_box(self): self._user_bounding_box = None @property def has_user_bounding_box(self): """ A flag indicating whether or not a custom bounding_box has been assigned to this model by a user, via assignment to ``model.bounding_box``. """ return self._user_bounding_box is not None @property def cov_matrix(self): """ Fitter should set covariance matrix, if available. """ return self._cov_matrix @cov_matrix.setter def cov_matrix(self, cov): self._cov_matrix = cov unfix_untied_params = [ p for p in self.param_names if (self.fixed[p] is False) and (self.tied[p] is False) ] if type(cov) == list: # model set param_stds = [] for c in cov: param_stds.append( [np.sqrt(x) if x > 0 else None for x in np.diag(c.cov_matrix)] ) for p, param_name in enumerate(unfix_untied_params): par = getattr(self, param_name) par.std = [item[p] for item in param_stds] setattr(self, param_name, par) else: param_stds = [ np.sqrt(x) if x > 0 else None for x in np.diag(cov.cov_matrix) ] for param_name in unfix_untied_params: par = getattr(self, param_name) par.std = param_stds.pop(0) setattr(self, param_name, par) @property def stds(self): """ Standard deviation of parameters, if covariance matrix is available. """ return self._stds @stds.setter def stds(self, stds): self._stds = stds @property def separable(self): """A flag indicating whether a model is separable.""" if self._separable is not None: return self._separable raise NotImplementedError( 'The "separable" property is not defined for ' f"model {self.__class__.__name__}" ) # *** Public methods *** def without_units_for_data(self, **kwargs): """ Return an instance of the model for which the parameter values have been converted to the right units for the data, then the units have been stripped away. The input and output Quantity objects should be given as keyword arguments. Notes ----- This method is needed in order to be able to fit models with units in the parameters, since we need to temporarily strip away the units from the model during the fitting (which might be done by e.g. scipy functions). The units that the parameters should be converted to are not necessarily the units of the input data, but are derived from them. Model subclasses that want fitting to work in the presence of quantities need to define a ``_parameter_units_for_data_units`` method that takes the input and output units (as two dictionaries) and returns a dictionary giving the target units for each parameter. """ model = self.copy() inputs_unit = { inp: getattr(kwargs[inp], "unit", dimensionless_unscaled) for inp in self.inputs if kwargs[inp] is not None } outputs_unit = { out: getattr(kwargs[out], "unit", dimensionless_unscaled) for out in self.outputs if kwargs[out] is not None } parameter_units = self._parameter_units_for_data_units( inputs_unit, outputs_unit ) for name, unit in parameter_units.items(): parameter = getattr(model, name) if parameter.unit is not None: parameter.value = parameter.quantity.to(unit).value parameter._set_unit(None, force=True) if isinstance(model, CompoundModel): model.strip_units_from_tree() return model def output_units(self, **kwargs): """ Return a dictionary of output units for this model given a dictionary of fitting inputs and outputs. The input and output Quantity objects should be given as keyword arguments. Notes ----- This method is needed in order to be able to fit models with units in the parameters, since we need to temporarily strip away the units from the model during the fitting (which might be done by e.g. scipy functions). This method will force extra model evaluations, which maybe computationally expensive. To avoid this, one can add a return_units property to the model, see :ref:`astropy:models_return_units`. """ units = self.return_units if units is None or units == {}: inputs = {inp: kwargs[inp] for inp in self.inputs} values = self(**inputs) if self.n_outputs == 1: values = (values,) units = { out: getattr(values[index], "unit", dimensionless_unscaled) for index, out in enumerate(self.outputs) } return units def strip_units_from_tree(self): for item in self._leaflist: for parname in item.param_names: par = getattr(item, parname) par._set_unit(None, force=True) def with_units_from_data(self, **kwargs): """ Return an instance of the model which has units for which the parameter values are compatible with the data units specified. The input and output Quantity objects should be given as keyword arguments. Notes ----- This method is needed in order to be able to fit models with units in the parameters, since we need to temporarily strip away the units from the model during the fitting (which might be done by e.g. scipy functions). The units that the parameters will gain are not necessarily the units of the input data, but are derived from them. Model subclasses that want fitting to work in the presence of quantities need to define a ``_parameter_units_for_data_units`` method that takes the input and output units (as two dictionaries) and returns a dictionary giving the target units for each parameter. """ model = self.copy() inputs_unit = { inp: getattr(kwargs[inp], "unit", dimensionless_unscaled) for inp in self.inputs if kwargs[inp] is not None } outputs_unit = { out: getattr(kwargs[out], "unit", dimensionless_unscaled) for out in self.outputs if kwargs[out] is not None } parameter_units = self._parameter_units_for_data_units( inputs_unit, outputs_unit ) # We are adding units to parameters that already have a value, but we # don't want to convert the parameter, just add the unit directly, # hence the call to ``_set_unit``. for name, unit in parameter_units.items(): parameter = getattr(model, name) parameter._set_unit(unit, force=True) return model @property def _has_units(self): # Returns True if any of the parameters have units return any(getattr(self, param).unit is not None for param in self.param_names) @property def _supports_unit_fitting(self): # If the model has a ``_parameter_units_for_data_units`` method, this # indicates that we have enough information to strip the units away # and add them back after fitting, when fitting quantities return hasattr(self, "_parameter_units_for_data_units") @abc.abstractmethod def evaluate(self, *args, **kwargs): """Evaluate the model on some input variables.""" def sum_of_implicit_terms(self, *args, **kwargs): """ Evaluate the sum of any implicit model terms on some input variables. This includes any fixed terms used in evaluating a linear model that do not have corresponding parameters exposed to the user. The prototypical case is `astropy.modeling.functional_models.Shift`, which corresponds to a function y = a + bx, where b=1 is intrinsically fixed by the type of model, such that sum_of_implicit_terms(x) == x. This method is needed by linear fitters to correct the dependent variable for the implicit term(s) when solving for the remaining terms (ie. a = y - bx). """ def render(self, out=None, coords=None): """ Evaluate a model at fixed positions, respecting the ``bounding_box``. The key difference relative to evaluating the model directly is that this method is limited to a bounding box if the `Model.bounding_box` attribute is set. Parameters ---------- out : `numpy.ndarray`, optional An array that the evaluated model will be added to. If this is not given (or given as ``None``), a new array will be created. coords : array-like, optional An array to be used to translate from the model's input coordinates to the ``out`` array. It should have the property that ``self(coords)`` yields the same shape as ``out``. If ``out`` is not specified, ``coords`` will be used to determine the shape of the returned array. If this is not provided (or None), the model will be evaluated on a grid determined by `Model.bounding_box`. Returns ------- out : `numpy.ndarray` The model added to ``out`` if ``out`` is not ``None``, or else a new array from evaluating the model over ``coords``. If ``out`` and ``coords`` are both `None`, the returned array is limited to the `Model.bounding_box` limits. If `Model.bounding_box` is `None`, ``arr`` or ``coords`` must be passed. Raises ------ ValueError If ``coords`` are not given and the the `Model.bounding_box` of this model is not set. Examples -------- :ref:`astropy:bounding-boxes` """ try: bbox = self.bounding_box except NotImplementedError: bbox = None if isinstance(bbox, ModelBoundingBox): bbox = bbox.bounding_box() ndim = self.n_inputs if (coords is None) and (out is None) and (bbox is None): raise ValueError("If no bounding_box is set, coords or out must be input.") # for consistent indexing if ndim == 1: if coords is not None: coords = [coords] if bbox is not None: bbox = [bbox] if coords is not None: coords = np.asanyarray(coords, dtype=float) # Check dimensions match out and model assert len(coords) == ndim if out is not None: if coords[0].shape != out.shape: raise ValueError("inconsistent shape of the output.") else: out = np.zeros(coords[0].shape) if out is not None: out = np.asanyarray(out) if out.ndim != ndim: raise ValueError( "the array and model must have the same number of dimensions." ) if bbox is not None: # Assures position is at center pixel, # important when using add_array. pd = ( np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2)) for bb in bbox]) .astype(int) .T ) pos, delta = pd if coords is not None: sub_shape = tuple(delta * 2 + 1) sub_coords = np.array( [extract_array(c, sub_shape, pos) for c in coords] ) else: limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T] sub_coords = np.mgrid[limits] sub_coords = sub_coords[::-1] if out is None: out = self(*sub_coords) else: try: out = add_array(out, self(*sub_coords), pos) except ValueError: raise ValueError( "The `bounding_box` is larger than the input out in " "one or more dimensions. Set " "`model.bounding_box = None`." ) else: if coords is None: im_shape = out.shape limits = [slice(i) for i in im_shape] coords = np.mgrid[limits] coords = coords[::-1] out += self(*coords) return out @property def input_units(self): """ This property is used to indicate what units or sets of units the evaluate method expects, and returns a dictionary mapping inputs to units (or `None` if any units are accepted). Model sub-classes can also use function annotations in evaluate to indicate valid input units, in which case this property should not be overridden since it will return the input units based on the annotations. """ if hasattr(self, "_input_units"): return self._input_units elif hasattr(self.evaluate, "__annotations__"): annotations = self.evaluate.__annotations__.copy() annotations.pop("return", None) if annotations: # If there are not annotations for all inputs this will error. return {name: annotations[name] for name in self.inputs} else: # None means any unit is accepted return None @property def return_units(self): """ This property is used to indicate what units or sets of units the output of evaluate should be in, and returns a dictionary mapping outputs to units (or `None` if any units are accepted). Model sub-classes can also use function annotations in evaluate to indicate valid output units, in which case this property should not be overridden since it will return the return units based on the annotations. """ if hasattr(self, "_return_units"): return self._return_units elif hasattr(self.evaluate, "__annotations__"): return self.evaluate.__annotations__.get("return", None) else: # None means any unit is accepted return None def _prepare_inputs_single_model(self, params, inputs, **kwargs): broadcasts = [] for idx, _input in enumerate(inputs): input_shape = _input.shape # Ensure that array scalars are always upgrade to 1-D arrays for the # sake of consistency with how parameters work. They will be cast back # to scalars at the end if not input_shape: inputs[idx] = _input.reshape((1,)) if not params: max_broadcast = input_shape else: max_broadcast = () for param in params: try: if self.standard_broadcasting: broadcast = check_broadcast(input_shape, param.shape) else: broadcast = input_shape except IncompatibleShapeError: raise ValueError( f"self input argument {self.inputs[idx]!r} of shape" f" {input_shape!r} cannot be broadcast with parameter" f" {param.name!r} of shape {param.shape!r}." ) if len(broadcast) > len(max_broadcast): max_broadcast = broadcast elif len(broadcast) == len(max_broadcast): max_broadcast = max(max_broadcast, broadcast) broadcasts.append(max_broadcast) if self.n_outputs > self.n_inputs: extra_outputs = self.n_outputs - self.n_inputs if not broadcasts: # If there were no inputs then the broadcasts list is empty # just add a None since there is no broadcasting of outputs and # inputs necessary (see _prepare_outputs_single_self) broadcasts.append(None) broadcasts.extend([broadcasts[0]] * extra_outputs) return inputs, (broadcasts,) @staticmethod def _remove_axes_from_shape(shape, axis): """ Given a shape tuple as the first input, construct a new one by removing that particular axis from the shape and all preceding axes. Negative axis numbers are permittted, where the axis is relative to the last axis. """ if len(shape) == 0: return shape if axis < 0: axis = len(shape) + axis return shape[:axis] + shape[axis + 1 :] if axis >= len(shape): axis = len(shape) - 1 shape = shape[axis + 1 :] return shape def _prepare_inputs_model_set(self, params, inputs, model_set_axis_input, **kwargs): reshaped = [] pivots = [] model_set_axis_param = self.model_set_axis # needed to reshape param for idx, _input in enumerate(inputs): max_param_shape = () if self._n_models > 1 and model_set_axis_input is not False: # Use the shape of the input *excluding* the model axis input_shape = ( _input.shape[:model_set_axis_input] + _input.shape[model_set_axis_input + 1 :] ) else: input_shape = _input.shape for param in params: try: check_broadcast( input_shape, self._remove_axes_from_shape(param.shape, model_set_axis_param), ) except IncompatibleShapeError: raise ValueError( f"Model input argument {self.inputs[idx]!r} of shape" f" {input_shape!r} " f"cannot be broadcast with parameter {param.name!r} of shape " f"{self._remove_axes_from_shape(param.shape, model_set_axis_param)!r}." ) if len(param.shape) - 1 > len(max_param_shape): max_param_shape = self._remove_axes_from_shape( param.shape, model_set_axis_param ) # We've now determined that, excluding the model_set_axis, the # input can broadcast with all the parameters input_ndim = len(input_shape) if model_set_axis_input is False: if len(max_param_shape) > input_ndim: # Just needs to prepend new axes to the input n_new_axes = 1 + len(max_param_shape) - input_ndim new_axes = (1,) * n_new_axes new_shape = new_axes + _input.shape pivot = model_set_axis_param else: pivot = input_ndim - len(max_param_shape) new_shape = _input.shape[:pivot] + (1,) + _input.shape[pivot:] new_input = _input.reshape(new_shape) else: if len(max_param_shape) >= input_ndim: n_new_axes = len(max_param_shape) - input_ndim pivot = self.model_set_axis new_axes = (1,) * n_new_axes new_shape = ( _input.shape[: pivot + 1] + new_axes + _input.shape[pivot + 1 :] ) new_input = _input.reshape(new_shape) else: pivot = _input.ndim - len(max_param_shape) - 1 new_input = np.rollaxis(_input, model_set_axis_input, pivot + 1) pivots.append(pivot) reshaped.append(new_input) if self.n_inputs < self.n_outputs: pivots.extend([model_set_axis_input] * (self.n_outputs - self.n_inputs)) return reshaped, (pivots,) def prepare_inputs( self, *inputs, model_set_axis=None, equivalencies=None, **kwargs ): """ This method is used in `~astropy.modeling.Model.__call__` to ensure that all the inputs to the model can be broadcast into compatible shapes (if one or both of them are input as arrays), particularly if there are more than one parameter sets. This also makes sure that (if applicable) the units of the input will be compatible with the evaluate method. """ # When we instantiate the model class, we make sure that __call__ can # take the following two keyword arguments: model_set_axis and # equivalencies. if model_set_axis is None: # By default the model_set_axis for the input is assumed to be the # same as that for the parameters the model was defined with # TODO: Ensure that negative model_set_axis arguments are respected model_set_axis = self.model_set_axis params = [getattr(self, name) for name in self.param_names] inputs = [np.asanyarray(_input, dtype=float) for _input in inputs] self._validate_input_shapes(inputs, self.inputs, model_set_axis) inputs_map = kwargs.get("inputs_map", None) inputs = self._validate_input_units(inputs, equivalencies, inputs_map) # The input formatting required for single models versus a multiple # model set are different enough that they've been split into separate # subroutines if self._n_models == 1: return self._prepare_inputs_single_model(params, inputs, **kwargs) else: return self._prepare_inputs_model_set( params, inputs, model_set_axis, **kwargs ) def _validate_input_units(self, inputs, equivalencies=None, inputs_map=None): inputs = list(inputs) name = self.name or self.__class__.__name__ # Check that the units are correct, if applicable if self.input_units is not None: # If a leaflist is provided that means this is in the context of # a compound model and it is necessary to create the appropriate # alias for the input coordinate name for the equivalencies dict if inputs_map: edict = {} for mod, mapping in inputs_map: if self is mod: edict[mapping[0]] = equivalencies[mapping[1]] else: edict = equivalencies # We combine any instance-level input equivalencies with user # specified ones at call-time. input_units_equivalencies = _combine_equivalency_dict( self.inputs, edict, self.input_units_equivalencies ) # We now iterate over the different inputs and make sure that their # units are consistent with those specified in input_units. for i in range(len(inputs)): input_name = self.inputs[i] input_unit = self.input_units.get(input_name, None) if input_unit is None: continue if isinstance(inputs[i], Quantity): # We check for consistency of the units with input_units, # taking into account any equivalencies if inputs[i].unit.is_equivalent( input_unit, equivalencies=input_units_equivalencies[input_name] ): # If equivalencies have been specified, we need to # convert the input to the input units - this is # because some equivalencies are non-linear, and # we need to be sure that we evaluate the model in # its own frame of reference. If input_units_strict # is set, we also need to convert to the input units. if ( len(input_units_equivalencies) > 0 or self.input_units_strict[input_name] ): inputs[i] = inputs[i].to( input_unit, equivalencies=input_units_equivalencies[input_name], ) else: # We consider the following two cases separately so as # to be able to raise more appropriate/nicer exceptions if input_unit is dimensionless_unscaled: raise UnitsError( f"{name}: Units of input '{self.inputs[i]}', " f"{inputs[i].unit} ({inputs[i].unit.physical_type})," "could not be converted to " "required dimensionless " "input" ) else: raise UnitsError( f"{name}: Units of input '{self.inputs[i]}', " f"{inputs[i].unit} ({inputs[i].unit.physical_type})," " could not be " "converted to required input" f" units of {input_unit} ({input_unit.physical_type})" ) else: # If we allow dimensionless input, we add the units to the # input values without conversion, otherwise we raise an # exception. if ( not self.input_units_allow_dimensionless[input_name] and input_unit is not dimensionless_unscaled and input_unit is not None ): if np.any(inputs[i] != 0): raise UnitsError( f"{name}: Units of input '{self.inputs[i]}'," " (dimensionless), could not be converted to required " f"input units of {input_unit} " f"({input_unit.physical_type})" ) return inputs def _process_output_units(self, inputs, outputs): inputs_are_quantity = any(isinstance(i, Quantity) for i in inputs) if self.return_units and inputs_are_quantity: # We allow a non-iterable unit only if there is one output if self.n_outputs == 1 and not isiterable(self.return_units): return_units = {self.outputs[0]: self.return_units} else: return_units = self.return_units outputs = tuple( Quantity(out, return_units.get(out_name, None), subok=True) for out, out_name in zip(outputs, self.outputs) ) return outputs @staticmethod def _prepare_output_single_model(output, broadcast_shape): if broadcast_shape is not None: if not broadcast_shape: return output.item() else: try: return output.reshape(broadcast_shape) except ValueError: try: return output.item() except ValueError: return output return output def _prepare_outputs_single_model(self, outputs, broadcasted_shapes): outputs = list(outputs) for idx, output in enumerate(outputs): try: broadcast_shape = check_broadcast(*broadcasted_shapes[0]) except (IndexError, TypeError): broadcast_shape = broadcasted_shapes[0][idx] outputs[idx] = self._prepare_output_single_model(output, broadcast_shape) return tuple(outputs) def _prepare_outputs_model_set(self, outputs, broadcasted_shapes, model_set_axis): pivots = broadcasted_shapes[0] # If model_set_axis = False was passed then use # self._model_set_axis to format the output. if model_set_axis is None or model_set_axis is False: model_set_axis = self.model_set_axis outputs = list(outputs) for idx, output in enumerate(outputs): pivot = pivots[idx] if pivot < output.ndim and pivot != model_set_axis: outputs[idx] = np.rollaxis(output, pivot, model_set_axis) return tuple(outputs) def prepare_outputs(self, broadcasted_shapes, *outputs, **kwargs): model_set_axis = kwargs.get("model_set_axis", None) if len(self) == 1: return self._prepare_outputs_single_model(outputs, broadcasted_shapes) else: return self._prepare_outputs_model_set( outputs, broadcasted_shapes, model_set_axis ) def copy(self): """ Return a copy of this model. Uses a deep copy so that all model attributes, including parameter values, are copied as well. """ return copy.deepcopy(self) def deepcopy(self): """ Return a deep copy of this model. """ return self.copy() @sharedmethod def rename(self, name): """ Return a copy of this model with a new name. """ new_model = self.copy() new_model._name = name return new_model def coerce_units( self, input_units=None, return_units=None, input_units_equivalencies=None, input_units_allow_dimensionless=False, ): """ Attach units to this (unitless) model. Parameters ---------- input_units : dict or tuple, optional Input units to attach. If dict, each key is the name of a model input, and the value is the unit to attach. If tuple, the elements are units to attach in order corresponding to `Model.inputs`. return_units : dict or tuple, optional Output units to attach. If dict, each key is the name of a model output, and the value is the unit to attach. If tuple, the elements are units to attach in order corresponding to `Model.outputs`. input_units_equivalencies : dict, optional Default equivalencies to apply to input values. If set, this should be a dictionary where each key is a string that corresponds to one of the model inputs. input_units_allow_dimensionless : bool or dict, optional Allow dimensionless input. If this is True, input values to evaluate will gain the units specified in input_units. If this is a dictionary then it should map input name to a bool to allow dimensionless numbers for that input. Returns ------- `CompoundModel` A `CompoundModel` composed of the current model plus `~astropy.modeling.mappings.UnitsMapping` model(s) that attach the units. Raises ------ ValueError If the current model already has units. Examples -------- Wrapping a unitless model to require and convert units: >>> from astropy.modeling.models import Polynomial1D >>> from astropy import units as u >>> poly = Polynomial1D(1, c0=1, c1=2) >>> model = poly.coerce_units((u.m,), (u.s,)) >>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP <Quantity 21. s> >>> model(u.Quantity(1000, u.cm)) # doctest: +FLOAT_CMP <Quantity 21. s> >>> model(u.Quantity(10, u.cm)) # doctest: +FLOAT_CMP <Quantity 1.2 s> Wrapping a unitless model but still permitting unitless input: >>> from astropy.modeling.models import Polynomial1D >>> from astropy import units as u >>> poly = Polynomial1D(1, c0=1, c1=2) >>> model = poly.coerce_units((u.m,), (u.s,), input_units_allow_dimensionless=True) >>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP <Quantity 21. s> >>> model(10) # doctest: +FLOAT_CMP <Quantity 21. s> """ from .mappings import UnitsMapping result = self if input_units is not None: if self.input_units is not None: model_units = self.input_units else: model_units = {} for unit in [model_units.get(i) for i in self.inputs]: if unit is not None and unit != dimensionless_unscaled: raise ValueError( "Cannot specify input_units for model with existing input units" ) if isinstance(input_units, dict): if input_units.keys() != set(self.inputs): message = ( f"""input_units keys ({", ".join(input_units.keys())}) """ f"""do not match model inputs ({", ".join(self.inputs)})""" ) raise ValueError(message) input_units = [input_units[i] for i in self.inputs] if len(input_units) != self.n_inputs: message = ( "input_units length does not match n_inputs: " f"expected {self.n_inputs}, received {len(input_units)}" ) raise ValueError(message) mapping = tuple( (unit, model_units.get(i)) for i, unit in zip(self.inputs, input_units) ) input_mapping = UnitsMapping( mapping, input_units_equivalencies=input_units_equivalencies, input_units_allow_dimensionless=input_units_allow_dimensionless, ) input_mapping.inputs = self.inputs input_mapping.outputs = self.inputs result = input_mapping | result if return_units is not None: if self.return_units is not None: model_units = self.return_units else: model_units = {} for unit in [model_units.get(i) for i in self.outputs]: if unit is not None and unit != dimensionless_unscaled: raise ValueError( "Cannot specify return_units for model " "with existing output units" ) if isinstance(return_units, dict): if return_units.keys() != set(self.outputs): message = ( f"""return_units keys ({", ".join(return_units.keys())}) """ f"""do not match model outputs ({", ".join(self.outputs)})""" ) raise ValueError(message) return_units = [return_units[i] for i in self.outputs] if len(return_units) != self.n_outputs: message = ( "return_units length does not match n_outputs: " f"expected {self.n_outputs}, received {len(return_units)}" ) raise ValueError(message) mapping = tuple( (model_units.get(i), unit) for i, unit in zip(self.outputs, return_units) ) return_mapping = UnitsMapping(mapping) return_mapping.inputs = self.outputs return_mapping.outputs = self.outputs result = result | return_mapping return result @property def n_submodels(self): """ Return the number of components in a single model, which is obviously 1. """ return 1 def _initialize_constraints(self, kwargs): """ Pop parameter constraint values off the keyword arguments passed to `Model.__init__` and store them in private instance attributes. """ # Pop any constraints off the keyword arguments for constraint in self.parameter_constraints: values = kwargs.pop(constraint, {}) for ckey, cvalue in values.items(): param = getattr(self, ckey) setattr(param, constraint, cvalue) self._mconstraints = {} for constraint in self.model_constraints: values = kwargs.pop(constraint, []) self._mconstraints[constraint] = values def _initialize_parameters(self, args, kwargs): """ Initialize the _parameters array that stores raw parameter values for all parameter sets for use with vectorized fitting algorithms; on FittableModels the _param_name attributes actually just reference slices of this array. """ n_models = kwargs.pop("n_models", None) if not ( n_models is None or (isinstance(n_models, (int, np.integer)) and n_models >= 1) ): raise ValueError( "n_models must be either None (in which case it is " "determined from the model_set_axis of the parameter initial " "values) or it must be a positive integer " f"(got {n_models!r})" ) model_set_axis = kwargs.pop("model_set_axis", None) if model_set_axis is None: if n_models is not None and n_models > 1: # Default to zero model_set_axis = 0 else: # Otherwise disable model_set_axis = False else: if not ( model_set_axis is False or np.issubdtype(type(model_set_axis), np.integer) ): raise ValueError( "model_set_axis must be either False or an integer " "specifying the parameter array axis to map to each " f"model in a set of models (got {model_set_axis!r})." ) # Process positional arguments by matching them up with the # corresponding parameters in self.param_names--if any also appear as # keyword arguments this presents a conflict params = set() if len(args) > len(self.param_names): raise TypeError( f"{self.__class__.__name__}.__init__() takes at most " f"{len(self.param_names)} positional arguments ({len(args)} given)" ) self._model_set_axis = model_set_axis self._param_metrics = defaultdict(dict) for idx, arg in enumerate(args): if arg is None: # A value of None implies using the default value, if exists continue # We use quantity_asanyarray here instead of np.asanyarray because # if any of the arguments are quantities, we need to return a # Quantity object not a plain Numpy array. param_name = self.param_names[idx] params.add(param_name) if not isinstance(arg, Parameter): value = quantity_asanyarray(arg, dtype=float) else: value = arg self._initialize_parameter_value(param_name, value) # At this point the only remaining keyword arguments should be # parameter names; any others are in error. for param_name in self.param_names: if param_name in kwargs: if param_name in params: raise TypeError( f"{self.__class__.__name__}.__init__() got multiple values for" f" parameter {param_name!r}" ) value = kwargs.pop(param_name) if value is None: continue # We use quantity_asanyarray here instead of np.asanyarray # because if any of the arguments are quantities, we need # to return a Quantity object not a plain Numpy array. value = quantity_asanyarray(value, dtype=float) params.add(param_name) self._initialize_parameter_value(param_name, value) # Now deal with case where param_name is not supplied by args or kwargs for param_name in self.param_names: if param_name not in params: self._initialize_parameter_value(param_name, None) if kwargs: # If any keyword arguments were left over at this point they are # invalid--the base class should only be passed the parameter # values, constraints, and param_dim for kwarg in kwargs: # Just raise an error on the first unrecognized argument raise TypeError( f"{self.__class__.__name__}.__init__() got an unrecognized" f" parameter {kwarg!r}" ) # Determine the number of model sets: If the model_set_axis is # None then there is just one parameter set; otherwise it is determined # by the size of that axis on the first parameter--if the other # parameters don't have the right number of axes or the sizes of their # model_set_axis don't match an error is raised if model_set_axis is not False and n_models != 1 and params: max_ndim = 0 if model_set_axis < 0: min_ndim = abs(model_set_axis) else: min_ndim = model_set_axis + 1 for name in self.param_names: value = getattr(self, name) param_ndim = np.ndim(value) if param_ndim < min_ndim: raise InputParameterError( "All parameter values must be arrays of dimension at least" f" {min_ndim} for model_set_axis={model_set_axis} (the value" f" given for {name!r} is only {param_ndim}-dimensional)" ) max_ndim = max(max_ndim, param_ndim) if n_models is None: # Use the dimensions of the first parameter to determine # the number of model sets n_models = value.shape[model_set_axis] elif value.shape[model_set_axis] != n_models: raise InputParameterError( f"Inconsistent dimensions for parameter {name!r} for" f" {n_models} model sets. The length of axis" f" {model_set_axis} must be the same for all input parameter" " values" ) self._check_param_broadcast(max_ndim) else: if n_models is None: n_models = 1 self._check_param_broadcast(None) self._n_models = n_models # now validate parameters for name in params: param = getattr(self, name) if param._validator is not None: param._validator(self, param.value) def _initialize_parameter_value(self, param_name, value): """Mostly deals with consistency checks and determining unit issues.""" if isinstance(value, Parameter): self.__dict__[param_name] = value return param = getattr(self, param_name) # Use default if value is not provided if value is None: default = param.default if default is None: # No value was supplied for the parameter and the # parameter does not have a default, therefore the model # is underspecified raise TypeError( f"{self.__class__.__name__}.__init__() requires a value for " f"parameter {param_name!r}" ) value = default unit = param.unit else: if isinstance(value, Quantity): unit = value.unit value = value.value else: unit = None if unit is None and param.unit is not None: raise InputParameterError( f"{self.__class__.__name__}.__init__() requires a Quantity for" f" parameter {param_name!r}" ) param._unit = unit param._set_unit(unit, force=True) param.internal_unit = None if param._setter is not None: if unit is not None: _val = param._setter(value * unit) else: _val = param._setter(value) if isinstance(_val, Quantity): param.internal_unit = _val.unit param._internal_value = np.array(_val.value) else: param.internal_unit = None param._internal_value = np.array(_val) else: param._value = np.array(value) def _initialize_slices(self): param_metrics = self._param_metrics total_size = 0 for name in self.param_names: param = getattr(self, name) value = param.value param_size = np.size(value) param_shape = np.shape(value) param_slice = slice(total_size, total_size + param_size) param_metrics[name]["slice"] = param_slice param_metrics[name]["shape"] = param_shape param_metrics[name]["size"] = param_size total_size += param_size self._parameters = np.empty(total_size, dtype=np.float64) def _parameters_to_array(self): # Now set the parameter values (this will also fill # self._parameters) param_metrics = self._param_metrics for name in self.param_names: param = getattr(self, name) value = param.value if not isinstance(value, np.ndarray): value = np.array([value]) self._parameters[param_metrics[name]["slice"]] = value.ravel() # Finally validate all the parameters; we do this last so that # validators that depend on one of the other parameters' values will # work def _array_to_parameters(self): param_metrics = self._param_metrics for name in self.param_names: param = getattr(self, name) value = self._parameters[param_metrics[name]["slice"]] value.shape = param_metrics[name]["shape"] param.value = value def _check_param_broadcast(self, max_ndim): """ This subroutine checks that all parameter arrays can be broadcast against each other, and determines the shapes parameters must have in order to broadcast correctly. If model_set_axis is None this merely checks that the parameters broadcast and returns an empty dict if so. This mode is only used for single model sets. """ all_shapes = [] model_set_axis = self._model_set_axis for name in self.param_names: param = getattr(self, name) value = param.value param_shape = np.shape(value) param_ndim = len(param_shape) if max_ndim is not None and param_ndim < max_ndim: # All arrays have the same number of dimensions up to the # model_set_axis dimension, but after that they may have a # different number of trailing axes. The number of trailing # axes must be extended for mutual compatibility. For example # if max_ndim = 3 and model_set_axis = 0, an array with the # shape (2, 2) must be extended to (2, 1, 2). However, an # array with shape (2,) is extended to (2, 1). new_axes = (1,) * (max_ndim - param_ndim) if model_set_axis < 0: # Just need to prepend axes to make up the difference broadcast_shape = new_axes + param_shape else: broadcast_shape = ( param_shape[: model_set_axis + 1] + new_axes + param_shape[model_set_axis + 1 :] ) self._param_metrics[name]["broadcast_shape"] = broadcast_shape all_shapes.append(broadcast_shape) else: all_shapes.append(param_shape) # Now check mutual broadcastability of all shapes try: check_broadcast(*all_shapes) except IncompatibleShapeError as exc: shape_a, shape_a_idx, shape_b, shape_b_idx = exc.args param_a = self.param_names[shape_a_idx] param_b = self.param_names[shape_b_idx] raise InputParameterError( f"Parameter {param_a!r} of shape {shape_a!r} cannot be broadcast with " f"parameter {param_b!r} of shape {shape_b!r}. All parameter arrays " "must have shapes that are mutually compatible according " "to the broadcasting rules." ) def _param_sets(self, raw=False, units=False): """ Implementation of the Model.param_sets property. This internal implementation has a ``raw`` argument which controls whether or not to return the raw parameter values (i.e. the values that are actually stored in the ._parameters array, as opposed to the values displayed to users. In most cases these are one in the same but there are currently a few exceptions. Note: This is notably an overcomplicated device and may be removed entirely in the near future. """ values = [] shapes = [] for name in self.param_names: param = getattr(self, name) if raw and param._setter: value = param._internal_value else: value = param.value broadcast_shape = self._param_metrics[name].get("broadcast_shape") if broadcast_shape is not None: value = value.reshape(broadcast_shape) shapes.append(np.shape(value)) if len(self) == 1: # Add a single param set axis to the parameter's value (thus # converting scalars to shape (1,) array values) for # consistency value = np.array([value]) if units: if raw and param.internal_unit is not None: unit = param.internal_unit else: unit = param.unit if unit is not None: value = Quantity(value, unit, subok=True) values.append(value) if len(set(shapes)) != 1 or units: # If the parameters are not all the same shape, converting to an # array is going to produce an object array # However the way Numpy creates object arrays is tricky in that it # will recurse into array objects in the list and break them up # into separate objects. Doing things this way ensures a 1-D # object array the elements of which are the individual parameter # arrays. There's not much reason to do this over returning a list # except for consistency psets = np.empty(len(values), dtype=object) psets[:] = values return psets return np.array(values) def _format_repr(self, args=[], kwargs={}, defaults={}): """ Internal implementation of ``__repr__``. This is separated out for ease of use by subclasses that wish to override the default ``__repr__`` while keeping the same basic formatting. """ parts = [repr(a) for a in args] parts.extend( f"{name}={param_repr_oneline(getattr(self, name))}" for name in self.param_names ) if self.name is not None: parts.append(f"name={self.name!r}") for kwarg, value in kwargs.items(): if kwarg in defaults and defaults[kwarg] == value: continue parts.append(f"{kwarg}={value!r}") if len(self) > 1: parts.append(f"n_models={len(self)}") return f"<{self.__class__.__name__}({', '.join(parts)})>" def _format_str(self, keywords=[], defaults={}): """ Internal implementation of ``__str__``. This is separated out for ease of use by subclasses that wish to override the default ``__str__`` while keeping the same basic formatting. """ default_keywords = [ ("Model", self.__class__.__name__), ("Name", self.name), ("Inputs", self.inputs), ("Outputs", self.outputs), ("Model set size", len(self)), ] parts = [ f"{keyword}: {value}" for keyword, value in default_keywords if value is not None ] for keyword, value in keywords: if keyword.lower() in defaults and defaults[keyword.lower()] == value: continue parts.append(f"{keyword}: {value}") parts.append("Parameters:") if len(self) == 1: columns = [[getattr(self, name).value] for name in self.param_names] else: columns = [getattr(self, name).value for name in self.param_names] if columns: param_table = Table(columns, names=self.param_names) # Set units on the columns for name in self.param_names: param_table[name].unit = getattr(self, name).unit parts.append(indent(str(param_table), width=4)) return "\n".join(parts) class FittableModel(Model): """ Base class for models that can be fitted using the built-in fitting algorithms. """ linear = False # derivative with respect to parameters fit_deriv = None """ Function (similar to the model's `~Model.evaluate`) to compute the derivatives of the model with respect to its parameters, for use by fitting algorithms. In other words, this computes the Jacobian matrix with respect to the model's parameters. """ # Flag that indicates if the model derivatives with respect to parameters # are given in columns or rows col_fit_deriv = True fittable = True class Fittable1DModel(FittableModel): """ Base class for one-dimensional fittable models. This class provides an easier interface to defining new models. Examples can be found in `astropy.modeling.functional_models`. """ n_inputs = 1 n_outputs = 1 _separable = True class Fittable2DModel(FittableModel): """ Base class for two-dimensional fittable models. This class provides an easier interface to defining new models. Examples can be found in `astropy.modeling.functional_models`. """ n_inputs = 2 n_outputs = 1 def _make_arithmetic_operator(oper): # We don't bother with tuple unpacking here for efficiency's sake, but for # documentation purposes: # # f_eval, f_n_inputs, f_n_outputs = f # # and similarly for g def op(f, g): return (make_binary_operator_eval(oper, f[0], g[0]), f[1], f[2]) return op def _composition_operator(f, g): # We don't bother with tuple unpacking here for efficiency's sake, but for # documentation purposes: # # f_eval, f_n_inputs, f_n_outputs = f # # and similarly for g return (lambda inputs, params: g[0](f[0](inputs, params), params), f[1], g[2]) def _join_operator(f, g): # We don't bother with tuple unpacking here for efficiency's sake, but for # documentation purposes: # # f_eval, f_n_inputs, f_n_outputs = f # # and similarly for g return ( lambda inputs, params: ( f[0](inputs[: f[1]], params) + g[0](inputs[f[1] :], params) ), f[1] + g[1], f[2] + g[2], ) BINARY_OPERATORS = { "+": _make_arithmetic_operator(operator.add), "-": _make_arithmetic_operator(operator.sub), "*": _make_arithmetic_operator(operator.mul), "/": _make_arithmetic_operator(operator.truediv), "**": _make_arithmetic_operator(operator.pow), "|": _composition_operator, "&": _join_operator, } SPECIAL_OPERATORS = _SpecialOperatorsDict() def _add_special_operator(sop_name, sop): return SPECIAL_OPERATORS.add(sop_name, sop) class CompoundModel(Model): """ Base class for compound models. While it can be used directly, the recommended way to combine models is through the model operators. """ def __init__(self, op, left, right, name=None): self.__dict__["_param_names"] = None self._n_submodels = None self.op = op self.left = left self.right = right self._bounding_box = None self._user_bounding_box = None self._leaflist = None self._tdict = None self._parameters = None self._parameters_ = None self._param_metrics = None if op != "fix_inputs" and len(left) != len(right): raise ValueError("Both operands must have equal values for n_models") self._n_models = len(left) if op != "fix_inputs" and ( (left.model_set_axis != right.model_set_axis) or left.model_set_axis ): # not False and not 0 raise ValueError( "model_set_axis must be False or 0 and consistent for operands" ) self._model_set_axis = left.model_set_axis if op in ["+", "-", "*", "/", "**"] or op in SPECIAL_OPERATORS: if left.n_inputs != right.n_inputs or left.n_outputs != right.n_outputs: raise ModelDefinitionError( "Both operands must match numbers of inputs and outputs" ) self.n_inputs = left.n_inputs self.n_outputs = left.n_outputs self.inputs = left.inputs self.outputs = left.outputs elif op == "&": self.n_inputs = left.n_inputs + right.n_inputs self.n_outputs = left.n_outputs + right.n_outputs self.inputs = combine_labels(left.inputs, right.inputs) self.outputs = combine_labels(left.outputs, right.outputs) elif op == "|": if left.n_outputs != right.n_inputs: raise ModelDefinitionError( "Unsupported operands for |:" f" {left.name} (n_inputs={left.n_inputs}," f" n_outputs={left.n_outputs}) and" f" {right.name} (n_inputs={right.n_inputs}," f" n_outputs={right.n_outputs}); n_outputs for the left-hand model" " must match n_inputs for the right-hand model." ) self.n_inputs = left.n_inputs self.n_outputs = right.n_outputs self.inputs = left.inputs self.outputs = right.outputs elif op == "fix_inputs": if not isinstance(left, Model): raise ValueError( 'First argument to "fix_inputs" must be an instance of ' "an astropy Model." ) if not isinstance(right, dict): raise ValueError( 'Expected a dictionary for second argument of "fix_inputs".' ) # Dict keys must match either possible indices # for model on left side, or names for inputs. self.n_inputs = left.n_inputs - len(right) # Assign directly to the private attribute (instead of using the setter) # to avoid asserting the new number of outputs matches the old one. self._outputs = left.outputs self.n_outputs = left.n_outputs newinputs = list(left.inputs) keys = right.keys() input_ind = [] for key in keys: if np.issubdtype(type(key), np.integer): if key >= left.n_inputs or key < 0: raise ValueError( "Substitution key integer value " "not among possible input choices." ) if key in input_ind: raise ValueError( "Duplicate specification of same input (index/name)." ) input_ind.append(key) elif isinstance(key, str): if key not in left.inputs: raise ValueError( "Substitution key string not among possible input choices." ) # Check to see it doesn't match positional # specification. ind = left.inputs.index(key) if ind in input_ind: raise ValueError( "Duplicate specification of same input (index/name)." ) input_ind.append(ind) # Remove substituted inputs input_ind.sort() input_ind.reverse() for ind in input_ind: del newinputs[ind] self.inputs = tuple(newinputs) # Now check to see if the input model has bounding_box defined. # If so, remove the appropriate dimensions and set it for this # instance. try: self.bounding_box = self.left.bounding_box.fix_inputs(self, right) except NotImplementedError: pass else: raise ModelDefinitionError("Illegal operator: ", self.op) self.name = name self._fittable = None self.fit_deriv = None self.col_fit_deriv = None if op in ("|", "+", "-"): self.linear = left.linear and right.linear else: self.linear = False self.eqcons = [] self.ineqcons = [] self.n_left_params = len(self.left.parameters) self._map_parameters() def _get_left_inputs_from_args(self, args): return args[: self.left.n_inputs] def _get_right_inputs_from_args(self, args): op = self.op if op == "&": # Args expected to look like (*left inputs, *right inputs, *left params, *right params) return args[self.left.n_inputs : self.left.n_inputs + self.right.n_inputs] elif op == "|" or op == "fix_inputs": return None else: return args[: self.left.n_inputs] def _get_left_params_from_args(self, args): op = self.op if op == "&": # Args expected to look like (*left inputs, *right inputs, *left params, *right params) n_inputs = self.left.n_inputs + self.right.n_inputs return args[n_inputs : n_inputs + self.n_left_params] else: return args[self.left.n_inputs : self.left.n_inputs + self.n_left_params] def _get_right_params_from_args(self, args): op = self.op if op == "fix_inputs": return None if op == "&": # Args expected to look like (*left inputs, *right inputs, *left params, *right params) return args[self.left.n_inputs + self.right.n_inputs + self.n_left_params :] else: return args[self.left.n_inputs + self.n_left_params :] def _get_kwarg_model_parameters_as_positional(self, args, kwargs): # could do it with inserts but rebuilding seems like simpilist way # TODO: Check if any param names are in kwargs maybe as an intersection of sets? if self.op == "&": new_args = list(args[: self.left.n_inputs + self.right.n_inputs]) args_pos = self.left.n_inputs + self.right.n_inputs else: new_args = list(args[: self.left.n_inputs]) args_pos = self.left.n_inputs for param_name in self.param_names: kw_value = kwargs.pop(param_name, None) if kw_value is not None: value = kw_value else: try: value = args[args_pos] except IndexError: raise IndexError("Missing parameter or input") args_pos += 1 new_args.append(value) return new_args, kwargs def _apply_operators_to_value_lists(self, leftval, rightval, **kw): op = self.op if op == "+": return binary_operation(operator.add, leftval, rightval) elif op == "-": return binary_operation(operator.sub, leftval, rightval) elif op == "*": return binary_operation(operator.mul, leftval, rightval) elif op == "/": return binary_operation(operator.truediv, leftval, rightval) elif op == "**": return binary_operation(operator.pow, leftval, rightval) elif op == "&": if not isinstance(leftval, tuple): leftval = (leftval,) if not isinstance(rightval, tuple): rightval = (rightval,) return leftval + rightval elif op in SPECIAL_OPERATORS: return binary_operation(SPECIAL_OPERATORS[op], leftval, rightval) else: raise ModelDefinitionError("Unrecognized operator {op}") def evaluate(self, *args, **kw): op = self.op args, kw = self._get_kwarg_model_parameters_as_positional(args, kw) left_inputs = self._get_left_inputs_from_args(args) left_params = self._get_left_params_from_args(args) if op == "fix_inputs": pos_index = dict(zip(self.left.inputs, range(self.left.n_inputs))) fixed_inputs = { key if np.issubdtype(type(key), np.integer) else pos_index[key]: value for key, value in self.right.items() } left_inputs = [ fixed_inputs[ind] if ind in fixed_inputs.keys() else inp for ind, inp in enumerate(left_inputs) ] leftval = self.left.evaluate(*itertools.chain(left_inputs, left_params)) if op == "fix_inputs": return leftval right_inputs = self._get_right_inputs_from_args(args) right_params = self._get_right_params_from_args(args) if op == "|": if isinstance(leftval, tuple): return self.right.evaluate(*itertools.chain(leftval, right_params)) else: return self.right.evaluate(leftval, *right_params) else: rightval = self.right.evaluate(*itertools.chain(right_inputs, right_params)) return self._apply_operators_to_value_lists(leftval, rightval, **kw) @property def n_submodels(self): if self._leaflist is None: self._make_leaflist() return len(self._leaflist) @property def submodel_names(self): """Return the names of submodels in a ``CompoundModel``.""" if self._leaflist is None: self._make_leaflist() names = [item.name for item in self._leaflist] nonecount = 0 newnames = [] for item in names: if item is None: newnames.append(f"None_{nonecount}") nonecount += 1 else: newnames.append(item) return tuple(newnames) def both_inverses_exist(self): """ if both members of this compound model have inverses return True. """ import warnings from astropy.utils.exceptions import AstropyDeprecationWarning warnings.warn( "CompoundModel.both_inverses_exist is deprecated. Use has_inverse instead.", AstropyDeprecationWarning, ) try: self.left.inverse self.right.inverse except NotImplementedError: return False return True def _pre_evaluate(self, *args, **kwargs): """ CompoundModel specific input setup that needs to occur prior to model evaluation. Note ---- All of the _pre_evaluate for each component model will be performed at the time that the individual model is evaluated. """ # If equivalencies are provided, necessary to map parameters and pass # the leaflist as a keyword input for use by model evaluation so that # the compound model input names can be matched to the model input # names. if "equivalencies" in kwargs: # Restructure to be useful for the individual model lookup kwargs["inputs_map"] = [ (value[0], (value[1], key)) for key, value in self.inputs_map().items() ] # Setup actual model evaluation method def evaluate(_inputs): return self._evaluate(*_inputs, **kwargs) return evaluate, args, None, kwargs @property def _argnames(self): """ No inputs should be used to determine input_shape when handling compound models. """ return () def _post_evaluate(self, inputs, outputs, broadcasted_shapes, with_bbox, **kwargs): """ CompoundModel specific post evaluation processing of outputs. Note ---- All of the _post_evaluate for each component model will be performed at the time that the individual model is evaluated. """ if self.get_bounding_box(with_bbox) is not None and self.n_outputs == 1: return outputs[0] return outputs def _evaluate(self, *args, **kw): op = self.op if op != "fix_inputs": if op != "&": leftval = self.left(*args, **kw) if op != "|": rightval = self.right(*args, **kw) else: rightval = None else: leftval = self.left(*(args[: self.left.n_inputs]), **kw) rightval = self.right(*(args[self.left.n_inputs :]), **kw) if op != "|": return self._apply_operators_to_value_lists(leftval, rightval, **kw) elif op == "|": if isinstance(leftval, tuple): return self.right(*leftval, **kw) else: return self.right(leftval, **kw) else: subs = self.right newargs = list(args) subinds = [] subvals = [] for key in subs.keys(): if np.issubdtype(type(key), np.integer): subinds.append(key) elif isinstance(key, str): ind = self.left.inputs.index(key) subinds.append(ind) subvals.append(subs[key]) # Turn inputs specified in kw into positional indices. # Names for compound inputs do not propagate to sub models. kwind = [] kwval = [] for kwkey in list(kw.keys()): if kwkey in self.inputs: ind = self.inputs.index(kwkey) if ind < len(args): raise ValueError( "Keyword argument duplicates positional value supplied." ) kwind.append(ind) kwval.append(kw[kwkey]) del kw[kwkey] # Build new argument list # Append keyword specified args first if kwind: kwargs = list(zip(kwind, kwval)) kwargs.sort() kwindsorted, kwvalsorted = list(zip(*kwargs)) newargs = newargs + list(kwvalsorted) if subinds: subargs = list(zip(subinds, subvals)) subargs.sort() # subindsorted, subvalsorted = list(zip(*subargs)) # The substitutions must be inserted in order for ind, val in subargs: newargs.insert(ind, val) return self.left(*newargs, **kw) @property def param_names(self): """An ordered list of parameter names.""" return self._param_names def _make_leaflist(self): tdict = {} leaflist = [] make_subtree_dict(self, "", tdict, leaflist) self._leaflist = leaflist self._tdict = tdict def __getattr__(self, name): """ If someone accesses an attribute not already defined, map the parameters, and then see if the requested attribute is one of the parameters. """ # The following test is needed to avoid infinite recursion # caused by deepcopy. There may be other such cases discovered. if name == "__setstate__": raise AttributeError if name in self._param_names: return self.__dict__[name] else: raise AttributeError(f'Attribute "{name}" not found') def __getitem__(self, index): if self._leaflist is None: self._make_leaflist() leaflist = self._leaflist tdict = self._tdict if isinstance(index, slice): if index.step: raise ValueError("Steps in slices not supported for compound models") if index.start is not None: if isinstance(index.start, str): start = self._str_index_to_int(index.start) else: start = index.start else: start = 0 if index.stop is not None: if isinstance(index.stop, str): stop = self._str_index_to_int(index.stop) else: stop = index.stop - 1 else: stop = len(leaflist) - 1 if index.stop == 0: raise ValueError("Slice endpoint cannot be 0") if start < 0: start = len(leaflist) + start if stop < 0: stop = len(leaflist) + stop # now search for matching node: if stop == start: # only single value, get leaf instead in code below index = start else: for key in tdict: node, leftind, rightind = tdict[key] if leftind == start and rightind == stop: return node raise IndexError("No appropriate subtree matches slice") if np.issubdtype(type(index), np.integer): return leaflist[index] elif isinstance(index, str): return leaflist[self._str_index_to_int(index)] else: raise TypeError("index must be integer, slice, or model name string") def _str_index_to_int(self, str_index): # Search through leaflist for item with that name found = [] for nleaf, leaf in enumerate(self._leaflist): if getattr(leaf, "name", None) == str_index: found.append(nleaf) if len(found) == 0: raise IndexError(f"No component with name '{str_index}' found") if len(found) > 1: raise IndexError( f"Multiple components found using '{str_index}' as name\n" f"at indices {found}" ) return found[0] @property def n_inputs(self): """The number of inputs of a model.""" return self._n_inputs @n_inputs.setter def n_inputs(self, value): self._n_inputs = value @property def n_outputs(self): """The number of outputs of a model.""" return self._n_outputs @n_outputs.setter def n_outputs(self, value): self._n_outputs = value @property def eqcons(self): return self._eqcons @eqcons.setter def eqcons(self, value): self._eqcons = value @property def ineqcons(self): return self._eqcons @ineqcons.setter def ineqcons(self, value): self._eqcons = value def traverse_postorder(self, include_operator=False): """Postorder traversal of the CompoundModel tree.""" res = [] if isinstance(self.left, CompoundModel): res = res + self.left.traverse_postorder(include_operator) else: res = res + [self.left] if isinstance(self.right, CompoundModel): res = res + self.right.traverse_postorder(include_operator) else: res = res + [self.right] if include_operator: res.append(self.op) else: res.append(self) return res def _format_expression(self, format_leaf=None): leaf_idx = 0 operands = deque() if format_leaf is None: format_leaf = lambda i, l: f"[{i}]" for node in self.traverse_postorder(): if not isinstance(node, CompoundModel): operands.append(format_leaf(leaf_idx, node)) leaf_idx += 1 continue right = operands.pop() left = operands.pop() if node.op in OPERATOR_PRECEDENCE: oper_order = OPERATOR_PRECEDENCE[node.op] if isinstance(node, CompoundModel): if ( isinstance(node.left, CompoundModel) and OPERATOR_PRECEDENCE[node.left.op] < oper_order ): left = f"({left})" if ( isinstance(node.right, CompoundModel) and OPERATOR_PRECEDENCE[node.right.op] < oper_order ): right = f"({right})" operands.append(" ".join((left, node.op, right))) else: left = f"(({left})," right = f"({right}))" operands.append(" ".join((node.op[0], left, right))) return "".join(operands) def _format_components(self): if self._parameters_ is None: self._map_parameters() return "\n\n".join(f"[{idx}]: {m!r}" for idx, m in enumerate(self._leaflist)) def __str__(self): expression = self._format_expression() components = self._format_components() keywords = [ ("Expression", expression), ("Components", "\n" + indent(components)), ] return super()._format_str(keywords=keywords) def rename(self, name): self.name = name return self @property def isleaf(self): return False @property def inverse(self): if self.op == "|": return self.right.inverse | self.left.inverse elif self.op == "&": return self.left.inverse & self.right.inverse else: return NotImplemented @property def fittable(self): """Set the fittable attribute on a compound model.""" if self._fittable is None: if self._leaflist is None: self._map_parameters() self._fittable = all(m.fittable for m in self._leaflist) return self._fittable __add__ = _model_oper("+") __sub__ = _model_oper("-") __mul__ = _model_oper("*") __truediv__ = _model_oper("/") __pow__ = _model_oper("**") __or__ = _model_oper("|") __and__ = _model_oper("&") def _map_parameters(self): """ Map all the constituent model parameters to the compound object, renaming as necessary by appending a suffix number. This can be an expensive operation, particularly for a complex expression tree. All the corresponding parameter attributes are created that one expects for the Model class. The parameter objects that the attributes point to are the same objects as in the constiutent models. Changes made to parameter values to either are seen by both. Prior to calling this, none of the associated attributes will exist. This method must be called to make the model usable by fitting engines. If oldnames=True, then parameters are named as in the original implementation of compound models. """ if self._parameters is not None: # do nothing return if self._leaflist is None: self._make_leaflist() self._parameters_ = {} param_map = {} self._param_names = [] for lindex, leaf in enumerate(self._leaflist): if not isinstance(leaf, dict): for param_name in leaf.param_names: param = getattr(leaf, param_name) new_param_name = f"{param_name}_{lindex}" self.__dict__[new_param_name] = param self._parameters_[new_param_name] = param self._param_names.append(new_param_name) param_map[new_param_name] = (lindex, param_name) self._param_metrics = {} self._param_map = param_map self._param_map_inverse = {v: k for k, v in param_map.items()} self._initialize_slices() self._param_names = tuple(self._param_names) def _initialize_slices(self): param_metrics = self._param_metrics total_size = 0 for name in self.param_names: param = getattr(self, name) value = param.value param_size = np.size(value) param_shape = np.shape(value) param_slice = slice(total_size, total_size + param_size) param_metrics[name] = {} param_metrics[name]["slice"] = param_slice param_metrics[name]["shape"] = param_shape param_metrics[name]["size"] = param_size total_size += param_size self._parameters = np.empty(total_size, dtype=np.float64) @staticmethod def _recursive_lookup(branch, adict, key): if isinstance(branch, CompoundModel): return adict[key] return branch, key def inputs_map(self): """ Map the names of the inputs to this ExpressionTree to the inputs to the leaf models. """ inputs_map = {} if not isinstance( self.op, str ): # If we don't have an operator the mapping is trivial return {inp: (self, inp) for inp in self.inputs} elif self.op == "|": if isinstance(self.left, CompoundModel): l_inputs_map = self.left.inputs_map() for inp in self.inputs: if isinstance(self.left, CompoundModel): inputs_map[inp] = l_inputs_map[inp] else: inputs_map[inp] = self.left, inp elif self.op == "&": if isinstance(self.left, CompoundModel): l_inputs_map = self.left.inputs_map() if isinstance(self.right, CompoundModel): r_inputs_map = self.right.inputs_map() for i, inp in enumerate(self.inputs): if i < len(self.left.inputs): # Get from left if isinstance(self.left, CompoundModel): inputs_map[inp] = l_inputs_map[self.left.inputs[i]] else: inputs_map[inp] = self.left, self.left.inputs[i] else: # Get from right if isinstance(self.right, CompoundModel): inputs_map[inp] = r_inputs_map[ self.right.inputs[i - len(self.left.inputs)] ] else: inputs_map[inp] = ( self.right, self.right.inputs[i - len(self.left.inputs)], ) elif self.op == "fix_inputs": fixed_ind = list(self.right.keys()) ind = [ list(self.left.inputs).index(i) if isinstance(i, str) else i for i in fixed_ind ] inp_ind = list(range(self.left.n_inputs)) for i in ind: inp_ind.remove(i) for i in inp_ind: inputs_map[self.left.inputs[i]] = self.left, self.left.inputs[i] else: if isinstance(self.left, CompoundModel): l_inputs_map = self.left.inputs_map() for inp in self.left.inputs: if isinstance(self.left, CompoundModel): inputs_map[inp] = l_inputs_map[inp] else: inputs_map[inp] = self.left, inp return inputs_map def _parameter_units_for_data_units(self, input_units, output_units): if self._leaflist is None: self._map_parameters() units_for_data = {} for imodel, model in enumerate(self._leaflist): units_for_data_leaf = model._parameter_units_for_data_units( input_units, output_units ) for param_leaf in units_for_data_leaf: param = self._param_map_inverse[(imodel, param_leaf)] units_for_data[param] = units_for_data_leaf[param_leaf] return units_for_data @property def input_units(self): inputs_map = self.inputs_map() input_units_dict = { key: inputs_map[key][0].input_units[orig_key] for key, (mod, orig_key) in inputs_map.items() if inputs_map[key][0].input_units is not None } if input_units_dict: return input_units_dict return None @property def input_units_equivalencies(self): inputs_map = self.inputs_map() input_units_equivalencies_dict = { key: inputs_map[key][0].input_units_equivalencies[orig_key] for key, (mod, orig_key) in inputs_map.items() if inputs_map[key][0].input_units_equivalencies is not None } if not input_units_equivalencies_dict: return None return input_units_equivalencies_dict @property def input_units_allow_dimensionless(self): inputs_map = self.inputs_map() return { key: inputs_map[key][0].input_units_allow_dimensionless[orig_key] for key, (mod, orig_key) in inputs_map.items() } @property def input_units_strict(self): inputs_map = self.inputs_map() return { key: inputs_map[key][0].input_units_strict[orig_key] for key, (mod, orig_key) in inputs_map.items() } @property def return_units(self): outputs_map = self.outputs_map() return { key: outputs_map[key][0].return_units[orig_key] for key, (mod, orig_key) in outputs_map.items() if outputs_map[key][0].return_units is not None } def outputs_map(self): """ Map the names of the outputs to this ExpressionTree to the outputs to the leaf models. """ outputs_map = {} if not isinstance( self.op, str ): # If we don't have an operator the mapping is trivial return {out: (self, out) for out in self.outputs} elif self.op == "|": if isinstance(self.right, CompoundModel): r_outputs_map = self.right.outputs_map() for out in self.outputs: if isinstance(self.right, CompoundModel): outputs_map[out] = r_outputs_map[out] else: outputs_map[out] = self.right, out elif self.op == "&": if isinstance(self.left, CompoundModel): l_outputs_map = self.left.outputs_map() if isinstance(self.right, CompoundModel): r_outputs_map = self.right.outputs_map() for i, out in enumerate(self.outputs): if i < len(self.left.outputs): # Get from left if isinstance(self.left, CompoundModel): outputs_map[out] = l_outputs_map[self.left.outputs[i]] else: outputs_map[out] = self.left, self.left.outputs[i] else: # Get from right if isinstance(self.right, CompoundModel): outputs_map[out] = r_outputs_map[ self.right.outputs[i - len(self.left.outputs)] ] else: outputs_map[out] = ( self.right, self.right.outputs[i - len(self.left.outputs)], ) elif self.op == "fix_inputs": return self.left.outputs_map() else: if isinstance(self.left, CompoundModel): l_outputs_map = self.left.outputs_map() for out in self.left.outputs: if isinstance(self.left, CompoundModel): outputs_map[out] = l_outputs_map()[out] else: outputs_map[out] = self.left, out return outputs_map @property def has_user_bounding_box(self): """ A flag indicating whether or not a custom bounding_box has been assigned to this model by a user, via assignment to ``model.bounding_box``. """ return self._user_bounding_box is not None def render(self, out=None, coords=None): """ Evaluate a model at fixed positions, respecting the ``bounding_box``. The key difference relative to evaluating the model directly is that this method is limited to a bounding box if the `Model.bounding_box` attribute is set. Parameters ---------- out : `numpy.ndarray`, optional An array that the evaluated model will be added to. If this is not given (or given as ``None``), a new array will be created. coords : array-like, optional An array to be used to translate from the model's input coordinates to the ``out`` array. It should have the property that ``self(coords)`` yields the same shape as ``out``. If ``out`` is not specified, ``coords`` will be used to determine the shape of the returned array. If this is not provided (or None), the model will be evaluated on a grid determined by `Model.bounding_box`. Returns ------- out : `numpy.ndarray` The model added to ``out`` if ``out`` is not ``None``, or else a new array from evaluating the model over ``coords``. If ``out`` and ``coords`` are both `None`, the returned array is limited to the `Model.bounding_box` limits. If `Model.bounding_box` is `None`, ``arr`` or ``coords`` must be passed. Raises ------ ValueError If ``coords`` are not given and the the `Model.bounding_box` of this model is not set. Examples -------- :ref:`astropy:bounding-boxes` """ bbox = self.get_bounding_box() ndim = self.n_inputs if (coords is None) and (out is None) and (bbox is None): raise ValueError("If no bounding_box is set, coords or out must be input.") # for consistent indexing if ndim == 1: if coords is not None: coords = [coords] if bbox is not None: bbox = [bbox] if coords is not None: coords = np.asanyarray(coords, dtype=float) # Check dimensions match out and model assert len(coords) == ndim if out is not None: if coords[0].shape != out.shape: raise ValueError("inconsistent shape of the output.") else: out = np.zeros(coords[0].shape) if out is not None: out = np.asanyarray(out) if out.ndim != ndim: raise ValueError( "the array and model must have the same number of dimensions." ) if bbox is not None: # Assures position is at center pixel, important when using # add_array. pd = ( np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2)) for bb in bbox]) .astype(int) .T ) pos, delta = pd if coords is not None: sub_shape = tuple(delta * 2 + 1) sub_coords = np.array( [extract_array(c, sub_shape, pos) for c in coords] ) else: limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T] sub_coords = np.mgrid[limits] sub_coords = sub_coords[::-1] if out is None: out = self(*sub_coords) else: try: out = add_array(out, self(*sub_coords), pos) except ValueError: raise ValueError( "The `bounding_box` is larger than the input out in " "one or more dimensions. Set " "`model.bounding_box = None`." ) else: if coords is None: im_shape = out.shape limits = [slice(i) for i in im_shape] coords = np.mgrid[limits] coords = coords[::-1] out += self(*coords) return out def replace_submodel(self, name, model): """ Construct a new `~astropy.modeling.CompoundModel` instance from an existing CompoundModel, replacing the named submodel with a new model. In order to ensure that inverses and names are kept/reconstructed, it's necessary to rebuild the CompoundModel from the replaced node all the way back to the base. The original CompoundModel is left untouched. Parameters ---------- name : str name of submodel to be replaced model : `~astropy.modeling.Model` replacement model """ submodels = [ m for m in self.traverse_postorder() if getattr(m, "name", None) == name ] if submodels: if len(submodels) > 1: raise ValueError(f"More than one submodel named {name}") old_model = submodels.pop() if len(old_model) != len(model): raise ValueError( "New and old models must have equal values for n_models" ) # Do this check first in order to raise a more helpful Exception, # although it would fail trying to construct the new CompoundModel if ( old_model.n_inputs != model.n_inputs or old_model.n_outputs != model.n_outputs ): raise ValueError( "New model must match numbers of inputs and " "outputs of existing model" ) tree = _get_submodel_path(self, name) while tree: branch = self.copy() for node in tree[:-1]: branch = getattr(branch, node) setattr(branch, tree[-1], model) model = CompoundModel( branch.op, branch.left, branch.right, name=branch.name ) tree = tree[:-1] return model else: raise ValueError(f"No submodels found named {name}") def _set_sub_models_and_parameter_units(self, left, right): """ Provides a work-around to properly set the sub models and respective parameters's units/values when using ``without_units_for_data`` or ``without_units_for_data`` methods. """ model = CompoundModel(self.op, left, right) self.left = left self.right = right for name in model.param_names: model_parameter = getattr(model, name) parameter = getattr(self, name) parameter.value = model_parameter.value parameter._set_unit(model_parameter.unit, force=True) def without_units_for_data(self, **kwargs): """ See `~astropy.modeling.Model.without_units_for_data` for overview of this method. Notes ----- This modifies the behavior of the base method to account for the case where the sub-models of a compound model have different output units. This is only valid for compound * and / compound models as in that case it is reasonable to mix the output units. It does this by modifying the output units of each sub model by using the output units of the other sub model so that we can apply the original function and get the desired result. Additional data has to be output in the mixed output unit case so that the units can be properly rebuilt by `~astropy.modeling.CompoundModel.with_units_from_data`. Outside the mixed output units, this method is identical to the base method. """ if self.op in ["*", "/"]: model = self.copy() inputs = {inp: kwargs[inp] for inp in self.inputs} left_units = self.left.output_units(**kwargs) right_units = self.right.output_units(**kwargs) if self.op == "*": left_kwargs = { out: kwargs[out] / right_units[out] for out in self.left.outputs if kwargs[out] is not None } right_kwargs = { out: kwargs[out] / left_units[out] for out in self.right.outputs if kwargs[out] is not None } else: left_kwargs = { out: kwargs[out] * right_units[out] for out in self.left.outputs if kwargs[out] is not None } right_kwargs = { out: 1 / kwargs[out] * left_units[out] for out in self.right.outputs if kwargs[out] is not None } left_kwargs.update(inputs.copy()) right_kwargs.update(inputs.copy()) left = self.left.without_units_for_data(**left_kwargs) if isinstance(left, tuple): left_kwargs["_left_kwargs"] = left[1] left_kwargs["_right_kwargs"] = left[2] left = left[0] right = self.right.without_units_for_data(**right_kwargs) if isinstance(right, tuple): right_kwargs["_left_kwargs"] = right[1] right_kwargs["_right_kwargs"] = right[2] right = right[0] model._set_sub_models_and_parameter_units(left, right) return model, left_kwargs, right_kwargs else: return super().without_units_for_data(**kwargs) def with_units_from_data(self, **kwargs): """ See `~astropy.modeling.Model.with_units_from_data` for overview of this method. Notes ----- This modifies the behavior of the base method to account for the case where the sub-models of a compound model have different output units. This is only valid for compound * and / compound models as in that case it is reasonable to mix the output units. In order to do this it requires some additional information output by `~astropy.modeling.CompoundModel.without_units_for_data` passed as keyword arguments under the keywords ``_left_kwargs`` and ``_right_kwargs``. Outside the mixed output units, this method is identical to the base method. """ if self.op in ["*", "/"]: left_kwargs = kwargs.pop("_left_kwargs") right_kwargs = kwargs.pop("_right_kwargs") left = self.left.with_units_from_data(**left_kwargs) right = self.right.with_units_from_data(**right_kwargs) model = self.copy() model._set_sub_models_and_parameter_units(left, right) return model else: return super().with_units_from_data(**kwargs) def _get_submodel_path(model, name): """Find the route down a CompoundModel's tree to the model with the specified name (whether it's a leaf or not). """ if getattr(model, "name", None) == name: return [] try: return ["left"] + _get_submodel_path(model.left, name) except (AttributeError, TypeError): pass try: return ["right"] + _get_submodel_path(model.right, name) except (AttributeError, TypeError): pass def binary_operation(binoperator, left, right): """ Perform binary operation. Operands may be matching tuples of operands. """ if isinstance(left, tuple) and isinstance(right, tuple): return tuple(binoperator(item[0], item[1]) for item in zip(left, right)) return binoperator(left, right) def get_ops(tree, opset): """ Recursive function to collect operators used. """ if isinstance(tree, CompoundModel): opset.add(tree.op) get_ops(tree.left, opset) get_ops(tree.right, opset) else: return def make_subtree_dict(tree, nodepath, tdict, leaflist): """Traverse a tree noting each node by a key. The key indicates all the left/right choices necessary to reach that node. Each key will reference a tuple that contains: - reference to the compound model for that node. - left most index contained within that subtree (relative to all indices for the whole tree) - right most index contained within that subtree """ # if this is a leaf, just append it to the leaflist if not hasattr(tree, "isleaf"): leaflist.append(tree) else: leftmostind = len(leaflist) make_subtree_dict(tree.left, nodepath + "l", tdict, leaflist) make_subtree_dict(tree.right, nodepath + "r", tdict, leaflist) rightmostind = len(leaflist) - 1 tdict[nodepath] = (tree, leftmostind, rightmostind) _ORDER_OF_OPERATORS = [("fix_inputs",), ("|",), ("&",), ("+", "-"), ("*", "/"), ("**",)] OPERATOR_PRECEDENCE = {} for idx, ops in enumerate(_ORDER_OF_OPERATORS): for op in ops: OPERATOR_PRECEDENCE[op] = idx del idx, op, ops def fix_inputs(modelinstance, values, bounding_boxes=None, selector_args=None): """ This function creates a compound model with one or more of the input values of the input model assigned fixed values (scalar or array). Parameters ---------- modelinstance : `~astropy.modeling.Model` instance This is the model that one or more of the model input values will be fixed to some constant value. values : dict A dictionary where the key identifies which input to fix and its value is the value to fix it at. The key may either be the name of the input or a number reflecting its order in the inputs. Examples -------- >>> from astropy.modeling.models import Gaussian2D >>> g = Gaussian2D(1, 2, 3, 4, 5) >>> gv = fix_inputs(g, {0: 2.5}) Results in a 1D function equivalent to Gaussian2D(1, 2, 3, 4, 5)(x=2.5, y) """ model = CompoundModel("fix_inputs", modelinstance, values) if bounding_boxes is not None: if selector_args is None: selector_args = tuple((key, True) for key in values.keys()) bbox = CompoundBoundingBox.validate( modelinstance, bounding_boxes, selector_args ) _selector = bbox.selector_args.get_fixed_values(modelinstance, values) new_bbox = bbox[_selector] new_bbox = new_bbox.__class__.validate(model, new_bbox) model.bounding_box = new_bbox return model def bind_bounding_box(modelinstance, bounding_box, ignored=None, order="C"): """ Set a validated bounding box to a model instance. Parameters ---------- modelinstance : `~astropy.modeling.Model` instance This is the model that the validated bounding box will be set on. bounding_box : tuple A bounding box tuple, see :ref:`astropy:bounding-boxes` for details ignored : list List of the inputs to be ignored by the bounding box. order : str, optional The ordering of the bounding box tuple, can be either ``'C'`` or ``'F'``. """ modelinstance.bounding_box = ModelBoundingBox.validate( modelinstance, bounding_box, ignored=ignored, order=order ) def bind_compound_bounding_box( modelinstance, bounding_boxes, selector_args, create_selector=None, ignored=None, order="C", ): """ Add a validated compound bounding box to a model instance. Parameters ---------- modelinstance : `~astropy.modeling.Model` instance This is the model that the validated compound bounding box will be set on. bounding_boxes : dict A dictionary of bounding box tuples, see :ref:`astropy:bounding-boxes` for details. selector_args : list List of selector argument tuples to define selection for compound bounding box, see :ref:`astropy:bounding-boxes` for details. create_selector : callable, optional An optional callable with interface (selector_value, model) which can generate a bounding box based on a selector value and model if there is no bounding box in the compound bounding box listed under that selector value. Default is ``None``, meaning new bounding box entries will not be automatically generated. ignored : list List of the inputs to be ignored by the bounding box. order : str, optional The ordering of the bounding box tuple, can be either ``'C'`` or ``'F'``. """ modelinstance.bounding_box = CompoundBoundingBox.validate( modelinstance, bounding_boxes, selector_args, create_selector=create_selector, ignored=ignored, order=order, ) def custom_model(*args, fit_deriv=None): """ Create a model from a user defined function. The inputs and parameters of the model will be inferred from the arguments of the function. This can be used either as a function or as a decorator. See below for examples of both usages. The model is separable only if there is a single input. .. note:: All model parameters have to be defined as keyword arguments with default values in the model function. Use `None` as a default argument value if you do not want to have a default value for that parameter. The standard settable model properties can be configured by default using keyword arguments matching the name of the property; however, these values are not set as model "parameters". Moreover, users cannot use keyword arguments matching non-settable model properties, with the exception of ``n_outputs`` which should be set to the number of outputs of your function. Parameters ---------- func : function Function which defines the model. It should take N positional arguments where ``N`` is dimensions of the model (the number of independent variable in the model), and any number of keyword arguments (the parameters). It must return the value of the model (typically as an array, but can also be a scalar for scalar inputs). This corresponds to the `~astropy.modeling.Model.evaluate` method. fit_deriv : function, optional Function which defines the Jacobian derivative of the model. I.e., the derivative with respect to the *parameters* of the model. It should have the same argument signature as ``func``, but should return a sequence where each element of the sequence is the derivative with respect to the corresponding argument. This corresponds to the :meth:`~astropy.modeling.FittableModel.fit_deriv` method. Examples -------- Define a sinusoidal model function as a custom 1D model:: >>> from astropy.modeling.models import custom_model >>> import numpy as np >>> def sine_model(x, amplitude=1., frequency=1.): ... return amplitude * np.sin(2 * np.pi * frequency * x) >>> def sine_deriv(x, amplitude=1., frequency=1.): ... return 2 * np.pi * amplitude * np.cos(2 * np.pi * frequency * x) >>> SineModel = custom_model(sine_model, fit_deriv=sine_deriv) Create an instance of the custom model and evaluate it:: >>> model = SineModel() >>> model(0.25) 1.0 This model instance can now be used like a usual astropy model. The next example demonstrates a 2D Moffat function model, and also demonstrates the support for docstrings (this example could also include a derivative, but it has been omitted for simplicity):: >>> @custom_model ... def Moffat2D(x, y, amplitude=1.0, x_0=0.0, y_0=0.0, gamma=1.0, ... alpha=1.0): ... \"\"\"Two dimensional Moffat function.\"\"\" ... rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2 ... return amplitude * (1 + rr_gg) ** (-alpha) ... >>> print(Moffat2D.__doc__) Two dimensional Moffat function. >>> model = Moffat2D() >>> model(1, 1) # doctest: +FLOAT_CMP 0.3333333333333333 """ if len(args) == 1 and callable(args[0]): return _custom_model_wrapper(args[0], fit_deriv=fit_deriv) elif not args: return functools.partial(_custom_model_wrapper, fit_deriv=fit_deriv) else: raise TypeError( f"{__name__} takes at most one positional argument (the callable/" "function to be turned into a model. When used as a decorator " "it should be passed keyword arguments only (if " "any)." ) def _custom_model_inputs(func): """ Processes the inputs to the `custom_model`'s function into the appropriate categories. Parameters ---------- func : callable Returns ------- inputs : list list of evaluation inputs special_params : dict dictionary of model properties which require special treatment settable_params : dict dictionary of defaults for settable model properties params : dict dictionary of model parameters set by `custom_model`'s function """ inputs, parameters = get_inputs_and_params(func) special = ["n_outputs"] settable = [ attr for attr, value in vars(Model).items() if isinstance(value, property) and value.fset is not None ] properties = [ attr for attr, value in vars(Model).items() if isinstance(value, property) and value.fset is None and attr not in special ] special_params = {} settable_params = {} params = {} for param in parameters: if param.name in special: special_params[param.name] = param.default elif param.name in settable: settable_params[param.name] = param.default elif param.name in properties: raise ValueError( f"Parameter '{param.name}' cannot be a model property: {properties}." ) else: params[param.name] = param.default return inputs, special_params, settable_params, params def _custom_model_wrapper(func, fit_deriv=None): """ Internal implementation `custom_model`. When `custom_model` is called as a function its arguments are passed to this function, and the result of this function is returned. When `custom_model` is used as a decorator a partial evaluation of this function is returned by `custom_model`. """ if not callable(func): raise ModelDefinitionError( "func is not callable; it must be a function or other callable object" ) if fit_deriv is not None and not callable(fit_deriv): raise ModelDefinitionError( "fit_deriv not callable; it must be a function or other callable object" ) model_name = func.__name__ inputs, special_params, settable_params, params = _custom_model_inputs(func) if fit_deriv is not None and len(fit_deriv.__defaults__) != len(params): raise ModelDefinitionError( "derivative function should accept same number of parameters as func." ) params = { param: Parameter(param, default=default) for param, default in params.items() } mod = find_current_module(2) if mod: modname = mod.__name__ else: modname = "__main__" members = { "__module__": str(modname), "__doc__": func.__doc__, "n_inputs": len(inputs), "n_outputs": special_params.pop("n_outputs", 1), "evaluate": staticmethod(func), "_settable_properties": settable_params, } if fit_deriv is not None: members["fit_deriv"] = staticmethod(fit_deriv) members.update(params) cls = type(model_name, (FittableModel,), members) cls._separable = len(inputs) == 1 return cls def render_model(model, arr=None, coords=None): """ Evaluates a model on an input array. Evaluation is limited to a bounding box if the `Model.bounding_box` attribute is set. Parameters ---------- model : `Model` Model to be evaluated. arr : `numpy.ndarray`, optional Array on which the model is evaluated. coords : array-like, optional Coordinate arrays mapping to ``arr``, such that ``arr[coords] == arr``. Returns ------- array : `numpy.ndarray` The model evaluated on the input ``arr`` or a new array from ``coords``. If ``arr`` and ``coords`` are both `None`, the returned array is limited to the `Model.bounding_box` limits. If `Model.bounding_box` is `None`, ``arr`` or ``coords`` must be passed. Examples -------- :ref:`astropy:bounding-boxes` """ bbox = model.bounding_box if (coords is None) & (arr is None) & (bbox is None): raise ValueError("If no bounding_box is set, coords or arr must be input.") # for consistent indexing if model.n_inputs == 1: if coords is not None: coords = [coords] if bbox is not None: bbox = [bbox] if arr is not None: arr = arr.copy() # Check dimensions match model if arr.ndim != model.n_inputs: raise ValueError( "number of array dimensions inconsistent with number of model inputs." ) if coords is not None: # Check dimensions match arr and model coords = np.array(coords) if len(coords) != model.n_inputs: raise ValueError( "coordinate length inconsistent with the number of model inputs." ) if arr is not None: if coords[0].shape != arr.shape: raise ValueError("coordinate shape inconsistent with the array shape.") else: arr = np.zeros(coords[0].shape) if bbox is not None: # assures position is at center pixel, important when using add_array pd = pos, delta = ( np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2)) for bb in bbox]) .astype(int) .T ) if coords is not None: sub_shape = tuple(delta * 2 + 1) sub_coords = np.array([extract_array(c, sub_shape, pos) for c in coords]) else: limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T] sub_coords = np.mgrid[limits] sub_coords = sub_coords[::-1] if arr is None: arr = model(*sub_coords) else: try: arr = add_array(arr, model(*sub_coords), pos) except ValueError: raise ValueError( "The `bounding_box` is larger than the input" " arr in one or more dimensions. Set " "`model.bounding_box = None`." ) else: if coords is None: im_shape = arr.shape limits = [slice(i) for i in im_shape] coords = np.mgrid[limits] arr += model(*coords[::-1]) return arr def hide_inverse(model): """ This is a convenience function intended to disable automatic generation of the inverse in compound models by disabling one of the constituent model's inverse. This is to handle cases where user provided inverse functions are not compatible within an expression. For example:: compound_model.inverse = hide_inverse(m1) + m2 + m3 This will insure that the defined inverse itself won't attempt to build its own inverse, which would otherwise fail in this example (e.g., m = m1 + m2 + m3 happens to raises an exception for this reason.) Note that this permanently disables it. To prevent that either copy the model or restore the inverse later. """ del model.inverse return model
decc975998cb985da59a5146a60c07a247a9aa58bd8f8ecd487f1fed1d178602
""" Special models useful for complex compound models where control is needed over which outputs from a source model are mapped to which inputs of a target model. """ # pylint: disable=invalid-name from astropy.units import Quantity from .core import FittableModel, Model __all__ = ["Mapping", "Identity", "UnitsMapping"] class Mapping(FittableModel): """ Allows inputs to be reordered, duplicated or dropped. Parameters ---------- mapping : tuple A tuple of integers representing indices of the inputs to this model to return and in what order to return them. See :ref:`astropy:compound-model-mappings` for more details. n_inputs : int Number of inputs; if `None` (default) then ``max(mapping) + 1`` is used (i.e. the highest input index used in the mapping). name : str, optional A human-friendly name associated with this model instance (particularly useful for identifying the individual components of a compound model). meta : dict-like Free-form metadata to associate with this model. Raises ------ TypeError Raised when number of inputs is less that ``max(mapping)``. Examples -------- >>> from astropy.modeling.models import Polynomial2D, Shift, Mapping >>> poly1 = Polynomial2D(1, c0_0=1, c1_0=2, c0_1=3) >>> poly2 = Polynomial2D(1, c0_0=1, c1_0=2.4, c0_1=2.1) >>> model = (Shift(1) & Shift(2)) | Mapping((0, 1, 0, 1)) | (poly1 & poly2) >>> model(1, 2) # doctest: +FLOAT_CMP (17.0, 14.2) """ linear = True # FittableModel is non-linear by default def __init__(self, mapping, n_inputs=None, name=None, meta=None): self._inputs = () self._outputs = () if n_inputs is None: self._n_inputs = max(mapping) + 1 else: self._n_inputs = n_inputs self._n_outputs = len(mapping) super().__init__(name=name, meta=meta) self.inputs = tuple("x" + str(idx) for idx in range(self._n_inputs)) self.outputs = tuple("x" + str(idx) for idx in range(self._n_outputs)) self._mapping = mapping self._input_units_strict = {key: False for key in self._inputs} self._input_units_allow_dimensionless = {key: False for key in self._inputs} @property def n_inputs(self): return self._n_inputs @property def n_outputs(self): return self._n_outputs @property def mapping(self): """Integers representing indices of the inputs.""" return self._mapping def __repr__(self): if self.name is None: return f"<Mapping({self.mapping})>" return f"<Mapping({self.mapping}, name={self.name!r})>" def evaluate(self, *args): if len(args) != self.n_inputs: name = self.name if self.name is not None else "Mapping" raise TypeError(f"{name} expects {self.n_inputs} inputs; got {len(args)}") result = tuple(args[idx] for idx in self._mapping) if self.n_outputs == 1: return result[0] return result @property def inverse(self): """ A `Mapping` representing the inverse of the current mapping. Raises ------ `NotImplementedError` An inverse does no exist on mappings that drop some of its inputs (there is then no way to reconstruct the inputs that were dropped). """ try: mapping = tuple(self.mapping.index(idx) for idx in range(self.n_inputs)) except ValueError: raise NotImplementedError( f"Mappings such as {self.mapping} that drop one or more of their inputs" " are not invertible at this time." ) inv = self.__class__(mapping) inv._inputs = self._outputs inv._outputs = self._inputs inv._n_inputs = len(inv._inputs) inv._n_outputs = len(inv._outputs) return inv class Identity(Mapping): """ Returns inputs unchanged. This class is useful in compound models when some of the inputs must be passed unchanged to the next model. Parameters ---------- n_inputs : int Specifies the number of inputs this identity model accepts. name : str, optional A human-friendly name associated with this model instance (particularly useful for identifying the individual components of a compound model). meta : dict-like Free-form metadata to associate with this model. Examples -------- Transform ``(x, y)`` by a shift in x, followed by scaling the two inputs:: >>> from astropy.modeling.models import (Polynomial1D, Shift, Scale, ... Identity) >>> model = (Shift(1) & Identity(1)) | Scale(1.2) & Scale(2) >>> model(1,1) # doctest: +FLOAT_CMP (2.4, 2.0) >>> model.inverse(2.4, 2) # doctest: +FLOAT_CMP (1.0, 1.0) """ linear = True # FittableModel is non-linear by default def __init__(self, n_inputs, name=None, meta=None): mapping = tuple(range(n_inputs)) super().__init__(mapping, name=name, meta=meta) def __repr__(self): if self.name is None: return f"<Identity({self.n_inputs})>" return f"<Identity({self.n_inputs}, name={self.name!r})>" @property def inverse(self): """ The inverse transformation. In this case of `Identity`, ``self.inverse is self``. """ return self class UnitsMapping(Model): """ Mapper that operates on the units of the input, first converting to canonical units, then assigning new units without further conversion. Used by Model.coerce_units to support units on otherwise unitless models such as Polynomial1D. Parameters ---------- mapping : tuple A tuple of (input_unit, output_unit) pairs, one per input, matched to the inputs by position. The first element of the each pair is the unit that the model will accept (specify ``dimensionless_unscaled`` to accept dimensionless input). The second element is the unit that the model will return. Specify ``dimensionless_unscaled`` to return dimensionless Quantity, and `None` to return raw values without Quantity. input_units_equivalencies : dict, optional Default equivalencies to apply to input values. If set, this should be a dictionary where each key is a string that corresponds to one of the model inputs. input_units_allow_dimensionless : dict or bool, optional Allow dimensionless input. If this is True, input values to evaluate will gain the units specified in input_units. If this is a dictionary then it should map input name to a bool to allow dimensionless numbers for that input. name : str, optional A human-friendly name associated with this model instance (particularly useful for identifying the individual components of a compound model). meta : dict-like, optional Free-form metadata to associate with this model. Examples -------- Wrapping a unitless model to require and convert units: >>> from astropy.modeling.models import Polynomial1D, UnitsMapping >>> from astropy import units as u >>> poly = Polynomial1D(1, c0=1, c1=2) >>> model = UnitsMapping(((u.m, None),)) | poly >>> model = model | UnitsMapping(((None, u.s),)) >>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP <Quantity 21. s> >>> model(u.Quantity(1000, u.cm)) # doctest: +FLOAT_CMP <Quantity 21. s> >>> model(u.Quantity(10, u.cm)) # doctest: +FLOAT_CMP <Quantity 1.2 s> Wrapping a unitless model but still permitting unitless input: >>> from astropy.modeling.models import Polynomial1D, UnitsMapping >>> from astropy import units as u >>> poly = Polynomial1D(1, c0=1, c1=2) >>> model = UnitsMapping(((u.m, None),), input_units_allow_dimensionless=True) | poly >>> model = model | UnitsMapping(((None, u.s),)) >>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP <Quantity 21. s> >>> model(10) # doctest: +FLOAT_CMP <Quantity 21. s> """ def __init__( self, mapping, input_units_equivalencies=None, input_units_allow_dimensionless=False, name=None, meta=None, ): self._mapping = mapping none_mapping_count = len([m for m in mapping if m[-1] is None]) if none_mapping_count > 0 and none_mapping_count != len(mapping): raise ValueError("If one return unit is None, then all must be None") # These attributes are read and handled by Model self._input_units_strict = True self.input_units_equivalencies = input_units_equivalencies self._input_units_allow_dimensionless = input_units_allow_dimensionless super().__init__(name=name, meta=meta) # Can't invoke this until after super().__init__, since # we need self.inputs and self.outputs to be populated. self._rebuild_units() def _rebuild_units(self): self._input_units = { input_name: input_unit for input_name, (input_unit, _) in zip(self.inputs, self.mapping) } @property def n_inputs(self): return len(self._mapping) @property def n_outputs(self): return len(self._mapping) @property def inputs(self): return super().inputs @inputs.setter def inputs(self, value): super(UnitsMapping, self.__class__).inputs.fset(self, value) self._rebuild_units() @property def outputs(self): return super().outputs @outputs.setter def outputs(self, value): super(UnitsMapping, self.__class__).outputs.fset(self, value) self._rebuild_units() @property def input_units(self): return self._input_units @property def mapping(self): return self._mapping def evaluate(self, *args): result = [] for arg, (_, return_unit) in zip(args, self.mapping): if isinstance(arg, Quantity): value = arg.value else: value = arg if return_unit is None: result.append(value) else: result.append(Quantity(value, return_unit, subok=True)) if self.n_outputs == 1: return result[0] else: return tuple(result) def __repr__(self): if self.name is None: return f"<UnitsMapping({self.mapping})>" else: return f"<UnitsMapping({self.mapping}, name={self.name!r})>"
eda0a52a913cef9eb1bf031c124784624c1f9a7f3e2cf511484ca002b7a29276
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Functions to determine if a model is separable, i.e. if the model outputs are independent. It analyzes ``n_inputs``, ``n_outputs`` and the operators in a compound model by stepping through the transforms and creating a ``coord_matrix`` of shape (``n_outputs``, ``n_inputs``). Each modeling operator is represented by a function which takes two simple models (or two ``coord_matrix`` arrays) and returns an array of shape (``n_outputs``, ``n_inputs``). """ import numpy as np from .core import CompoundModel, Model, ModelDefinitionError from .mappings import Mapping __all__ = ["is_separable", "separability_matrix"] def is_separable(transform): """ A separability test for the outputs of a transform. Parameters ---------- transform : `~astropy.modeling.core.Model` A (compound) model. Returns ------- is_separable : ndarray A boolean array with size ``transform.n_outputs`` where each element indicates whether the output is independent and the result of a separable transform. Examples -------- >>> from astropy.modeling.models import Shift, Scale, Rotation2D, Polynomial2D >>> is_separable(Shift(1) & Shift(2) | Scale(1) & Scale(2)) array([ True, True]...) >>> is_separable(Shift(1) & Shift(2) | Rotation2D(2)) array([False, False]...) >>> is_separable(Shift(1) & Shift(2) | Mapping([0, 1, 0, 1]) | \ Polynomial2D(1) & Polynomial2D(2)) array([False, False]...) >>> is_separable(Shift(1) & Shift(2) | Mapping([0, 1, 0, 1])) array([ True, True, True, True]...) """ if transform.n_inputs == 1 and transform.n_outputs > 1: is_separable = np.array([False] * transform.n_outputs).T return is_separable separable_matrix = _separable(transform) is_separable = separable_matrix.sum(1) is_separable = np.where(is_separable != 1, False, True) return is_separable def separability_matrix(transform): """ Compute the correlation between outputs and inputs. Parameters ---------- transform : `~astropy.modeling.core.Model` A (compound) model. Returns ------- separable_matrix : ndarray A boolean correlation matrix of shape (n_outputs, n_inputs). Indicates the dependence of outputs on inputs. For completely independent outputs, the diagonal elements are True and off-diagonal elements are False. Examples -------- >>> from astropy.modeling.models import Shift, Scale, Rotation2D, Polynomial2D >>> separability_matrix(Shift(1) & Shift(2) | Scale(1) & Scale(2)) array([[ True, False], [False, True]]...) >>> separability_matrix(Shift(1) & Shift(2) | Rotation2D(2)) array([[ True, True], [ True, True]]...) >>> separability_matrix(Shift(1) & Shift(2) | Mapping([0, 1, 0, 1]) | \ Polynomial2D(1) & Polynomial2D(2)) array([[ True, True], [ True, True]]...) >>> separability_matrix(Shift(1) & Shift(2) | Mapping([0, 1, 0, 1])) array([[ True, False], [False, True], [ True, False], [False, True]]...) """ if transform.n_inputs == 1 and transform.n_outputs > 1: return np.ones((transform.n_outputs, transform.n_inputs), dtype=np.bool_) separable_matrix = _separable(transform) separable_matrix = np.where(separable_matrix != 0, True, False) return separable_matrix def _compute_n_outputs(left, right): """ Compute the number of outputs of two models. The two models are the left and right model to an operation in the expression tree of a compound model. Parameters ---------- left, right : `astropy.modeling.Model` or ndarray If input is of an array, it is the output of `coord_matrix`. """ if isinstance(left, Model): lnout = left.n_outputs else: lnout = left.shape[0] if isinstance(right, Model): rnout = right.n_outputs else: rnout = right.shape[0] noutp = lnout + rnout return noutp def _arith_oper(left, right): """ Function corresponding to one of the arithmetic operators ['+', '-'. '*', '/', '**']. This always returns a nonseparable output. Parameters ---------- left, right : `astropy.modeling.Model` or ndarray If input is of an array, it is the output of `coord_matrix`. Returns ------- result : ndarray Result from this operation. """ def _n_inputs_outputs(input): if isinstance(input, Model): # Models have the same number of inputs and outputs. n_outputs, n_inputs = input.n_outputs, input.n_inputs else: n_outputs, n_inputs = input.shape return n_inputs, n_outputs left_inputs, left_outputs = _n_inputs_outputs(left) right_inputs, right_outputs = _n_inputs_outputs(right) if left_inputs != right_inputs or left_outputs != right_outputs: raise ModelDefinitionError( "Unsupported operands for arithmetic operator: left" f" (n_inputs={left_inputs}, n_outputs={left_outputs}) and right" f" (n_inputs={right_inputs}, n_outputs={right_outputs}); models must have" " the same n_inputs and the same n_outputs for this operator." ) result = np.ones((left_outputs, left_inputs)) return result def _coord_matrix(model, pos, noutp): """ Create an array representing inputs and outputs of a simple model. The array has a shape (noutp, model.n_inputs). Parameters ---------- model : `astropy.modeling.Model` model pos : str Position of this model in the expression tree. One of ['left', 'right']. noutp : int Number of outputs of the compound model of which the input model is a left or right child. """ if isinstance(model, Mapping): axes = [] for i in model.mapping: axis = np.zeros((model.n_inputs,)) axis[i] = 1 axes.append(axis) m = np.vstack(axes) mat = np.zeros((noutp, model.n_inputs)) if pos == "left": mat[: model.n_outputs, : model.n_inputs] = m else: mat[-model.n_outputs :, -model.n_inputs :] = m return mat if not model.separable: # this does not work for more than 2 coordinates mat = np.zeros((noutp, model.n_inputs)) if pos == "left": mat[: model.n_outputs, : model.n_inputs] = 1 else: mat[-model.n_outputs :, -model.n_inputs :] = 1 else: mat = np.zeros((noutp, model.n_inputs)) for i in range(model.n_inputs): mat[i, i] = 1 if pos == "right": mat = np.roll(mat, (noutp - model.n_outputs)) return mat def _cstack(left, right): """ Function corresponding to '&' operation. Parameters ---------- left, right : `astropy.modeling.Model` or ndarray If input is of an array, it is the output of `coord_matrix`. Returns ------- result : ndarray Result from this operation. """ noutp = _compute_n_outputs(left, right) if isinstance(left, Model): cleft = _coord_matrix(left, "left", noutp) else: cleft = np.zeros((noutp, left.shape[1])) cleft[: left.shape[0], : left.shape[1]] = left if isinstance(right, Model): cright = _coord_matrix(right, "right", noutp) else: cright = np.zeros((noutp, right.shape[1])) cright[-right.shape[0] :, -right.shape[1] :] = right return np.hstack([cleft, cright]) def _cdot(left, right): """ Function corresponding to "|" operation. Parameters ---------- left, right : `astropy.modeling.Model` or ndarray If input is of an array, it is the output of `coord_matrix`. Returns ------- result : ndarray Result from this operation. """ left, right = right, left def _n_inputs_outputs(input, position): """ Return ``n_inputs``, ``n_outputs`` for a model or coord_matrix. """ if isinstance(input, Model): coords = _coord_matrix(input, position, input.n_outputs) else: coords = input return coords cleft = _n_inputs_outputs(left, "left") cright = _n_inputs_outputs(right, "right") try: result = np.dot(cleft, cright) except ValueError: raise ModelDefinitionError( 'Models cannot be combined with the "|" operator; ' f"left coord_matrix is {cright}, right coord_matrix is {cleft}" ) return result def _separable(transform): """ Calculate the separability of outputs. Parameters ---------- transform : `astropy.modeling.Model` A transform (usually a compound model). Returns : is_separable : ndarray of dtype np.bool An array of shape (transform.n_outputs,) of boolean type Each element represents the separablity of the corresponding output. """ if ( transform_matrix := transform._calculate_separability_matrix() ) is not NotImplemented: return transform_matrix elif isinstance(transform, CompoundModel): sepleft = _separable(transform.left) sepright = _separable(transform.right) return _operators[transform.op](sepleft, sepright) elif isinstance(transform, Model): return _coord_matrix(transform, "left", transform.n_outputs) # Maps modeling operators to a function computing and represents the # relationship of axes as an array of 0-es and 1-s _operators = { "&": _cstack, "|": _cdot, "+": _arith_oper, "-": _arith_oper, "*": _arith_oper, "/": _arith_oper, "**": _arith_oper, }
e7093d69029631646d50de3a2ec5656dc786da85d32faf781adf774200ead52f
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module provides utility functions for the models package. """ import warnings # pylint: disable=invalid-name from collections import UserDict from collections.abc import MutableMapping from inspect import signature import numpy as np from astropy import units as u from astropy.utils.decorators import deprecated __doctest_skip__ = ["AliasDict"] __all__ = ["AliasDict", "poly_map_domain", "comb", "ellipse_extent"] deprecation_msg = """ AliasDict is deprecated because it no longer serves a function anywhere inside astropy. """ @deprecated("5.0", deprecation_msg) class AliasDict(MutableMapping): """ Creates a `dict` like object that wraps an existing `dict` or other `MutableMapping`, along with a `dict` of *key aliases* that translate between specific keys in this dict to different keys in the underlying dict. In other words, keys that do not have an associated alias are accessed and stored like a normal `dict`. However, a key that has an alias is accessed and stored to the "parent" dict via the alias. Parameters ---------- parent : dict-like The parent `dict` that aliased keys and accessed from and stored to. aliases : dict-like Maps keys in this dict to their associated keys in the parent dict. Examples -------- >>> parent = {'a': 1, 'b': 2, 'c': 3} >>> aliases = {'foo': 'a', 'bar': 'c'} >>> alias_dict = AliasDict(parent, aliases) >>> alias_dict['foo'] 1 >>> alias_dict['bar'] 3 Keys in the original parent dict are not visible if they were not aliased: >>> alias_dict['b'] Traceback (most recent call last): ... KeyError: 'b' Likewise, updates to aliased keys are reflected back in the parent dict: >>> alias_dict['foo'] = 42 >>> alias_dict['foo'] 42 >>> parent['a'] 42 However, updates/insertions to keys that are *not* aliased are not reflected in the parent dict: >>> alias_dict['qux'] = 99 >>> alias_dict['qux'] 99 >>> 'qux' in parent False In particular, updates on the `AliasDict` to a key that is equal to one of the aliased keys in the parent dict does *not* update the parent dict. For example, ``alias_dict`` aliases ``'foo'`` to ``'a'``. But assigning to a key ``'a'`` on the `AliasDict` does not impact the parent: >>> alias_dict['a'] = 'nope' >>> alias_dict['a'] 'nope' >>> parent['a'] 42 """ _store_type = dict """ Subclasses may override this to use other mapping types as the underlying storage, for example an `OrderedDict`. However, even in this case additional work may be needed to get things like the ordering right. """ def __init__(self, parent, aliases): self._parent = parent self._store = self._store_type() self._aliases = dict(aliases) def __getitem__(self, key): if key in self._aliases: try: return self._parent[self._aliases[key]] except KeyError: raise KeyError(key) return self._store[key] def __setitem__(self, key, value): if key in self._aliases: self._parent[self._aliases[key]] = value else: self._store[key] = value def __delitem__(self, key): if key in self._aliases: try: del self._parent[self._aliases[key]] except KeyError: raise KeyError(key) else: del self._store[key] def __iter__(self): """ First iterates over keys from the parent dict (if the aliased keys are present in the parent), followed by any keys in the local store. """ for key, alias in self._aliases.items(): if alias in self._parent: yield key for key in self._store: yield key def __len__(self): return len(list(iter(self))) def __repr__(self): # repr() just like any other dict--this should look transparent store_copy = self._store_type() for key, alias in self._aliases.items(): if alias in self._parent: store_copy[key] = self._parent[alias] store_copy.update(self._store) return repr(store_copy) def make_binary_operator_eval(oper, f, g): """ Given a binary operator (as a callable of two arguments) ``oper`` and two callables ``f`` and ``g`` which accept the same arguments, returns a *new* function that takes the same arguments as ``f`` and ``g``, but passes the outputs of ``f`` and ``g`` in the given ``oper``. ``f`` and ``g`` are assumed to return tuples (which may be 1-tuples). The given operator is applied element-wise to tuple outputs). Example ------- >>> from operator import add >>> def prod(x, y): ... return (x * y,) ... >>> sum_of_prod = make_binary_operator_eval(add, prod, prod) >>> sum_of_prod(3, 5) (30,) """ return lambda inputs, params: tuple( oper(x, y) for x, y in zip(f(inputs, params), g(inputs, params)) ) def poly_map_domain(oldx, domain, window): """ Map domain into window by shifting and scaling. Parameters ---------- oldx : array original coordinates domain : list or tuple of length 2 function domain window : list or tuple of length 2 range into which to map the domain """ domain = np.array(domain, dtype=np.float64) window = np.array(window, dtype=np.float64) if domain.shape != (2,) or window.shape != (2,): raise ValueError('Expected "domain" and "window" to be a tuple of size 2.') scl = (window[1] - window[0]) / (domain[1] - domain[0]) off = (window[0] * domain[1] - window[1] * domain[0]) / (domain[1] - domain[0]) return off + scl * oldx def _validate_domain_window(value): if value is not None: if np.asanyarray(value).shape != (2,): raise ValueError("domain and window should be tuples of size 2.") return tuple(value) return value @deprecated("5.3", alternative="math.comb") def comb(N, k): """ The number of combinations of N things taken k at a time. Parameters ---------- N : int, array Number of things. k : int, array Number of elements taken. """ if (k > N) or (N < 0) or (k < 0): return 0 val = 1 for j in range(min(k, N - k)): val = (val * (N - j)) / (j + 1) return val def array_repr_oneline(array): """ Represents a multi-dimensional Numpy array flattened onto a single line. """ r = np.array2string(array, separator=", ", suppress_small=True) return " ".join(line.strip() for line in r.splitlines()) def combine_labels(left, right): """ For use with the join operator &: Combine left input/output labels with right input/output labels. If none of the labels conflict then this just returns a sum of tuples. However if *any* of the labels conflict, this appends '0' to the left-hand labels and '1' to the right-hand labels so there is no ambiguity). """ if set(left).intersection(right): left = tuple(label + "0" for label in left) right = tuple(label + "1" for label in right) return left + right def ellipse_extent(a, b, theta): """ Calculates the half size of a box encapsulating a rotated 2D ellipse. Parameters ---------- a : float or `~astropy.units.Quantity` The ellipse semimajor axis. b : float or `~astropy.units.Quantity` The ellipse semiminor axis. theta : float or `~astropy.units.Quantity` ['angle'] The rotation angle as an angular quantity (`~astropy.units.Quantity` or `~astropy.coordinates.Angle`) or a value in radians (as a float). The rotation angle increases counterclockwise. Returns ------- offsets : tuple The absolute value of the offset distances from the ellipse center that define its bounding box region, ``(dx, dy)``. Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Ellipse2D from astropy.modeling.utils import ellipse_extent, render_model amplitude = 1 x0 = 50 y0 = 50 a = 30 b = 10 theta = np.pi / 4 model = Ellipse2D(amplitude, x0, y0, a, b, theta) dx, dy = ellipse_extent(a, b, theta) limits = [x0 - dx, x0 + dx, y0 - dy, y0 + dy] model.bounding_box = limits image = render_model(model) plt.imshow(image, cmap='binary', interpolation='nearest', alpha=.5, extent = limits) plt.show() """ from .parameters import Parameter # prevent circular import if isinstance(theta, Parameter): if theta.quantity is None: theta = theta.value else: theta = theta.quantity t = np.arctan2(-b * np.tan(theta), a) dx = a * np.cos(t) * np.cos(theta) - b * np.sin(t) * np.sin(theta) t = np.arctan2(b, a * np.tan(theta)) dy = b * np.sin(t) * np.cos(theta) + a * np.cos(t) * np.sin(theta) if isinstance(dx, u.Quantity) or isinstance(dy, u.Quantity): return np.abs(u.Quantity([dx, dy], subok=True)) return np.abs([dx, dy]) def get_inputs_and_params(func): """ Given a callable, determine the input variables and the parameters. Parameters ---------- func : callable Returns ------- inputs, params : tuple Each entry is a list of inspect.Parameter objects """ sig = signature(func) inputs = [] params = [] for param in sig.parameters.values(): if param.kind in (param.VAR_POSITIONAL, param.VAR_KEYWORD): raise ValueError("Signature must not have *args or **kwargs") if param.default == param.empty: inputs.append(param) else: params.append(param) return inputs, params def _combine_equivalency_dict(keys, eq1=None, eq2=None): # Given two dictionaries that give equivalencies for a set of keys, for # example input value names, return a dictionary that includes all the # equivalencies eq = {} for key in keys: eq[key] = [] if eq1 is not None and key in eq1: eq[key].extend(eq1[key]) if eq2 is not None and key in eq2: eq[key].extend(eq2[key]) return eq def _to_radian(value): """Convert ``value`` to radian.""" if isinstance(value, u.Quantity): return value.to(u.rad) return np.deg2rad(value) def _to_orig_unit(value, raw_unit=None, orig_unit=None): """Convert value with ``raw_unit`` to ``orig_unit``.""" if raw_unit is not None: return (value * raw_unit).to(orig_unit) return np.rad2deg(value) class _ConstraintsDict(UserDict): """ Wrapper around UserDict to allow updating the constraints on a Parameter when the dictionary is updated. """ def __init__(self, model, constraint_type): self._model = model self.constraint_type = constraint_type c = {} for name in model.param_names: param = getattr(model, name) c[name] = getattr(param, constraint_type) super().__init__(c) def __setitem__(self, key, val): super().__setitem__(key, val) param = getattr(self._model, key) setattr(param, self.constraint_type, val) class _SpecialOperatorsDict(UserDict): """ Wrapper around UserDict to allow for better tracking of the Special Operators for CompoundModels. This dictionary is structured so that one cannot inadvertently overwrite an existing special operator. Parameters ---------- unique_id: int the last used unique_id for a SPECIAL OPERATOR special_operators: dict a dictionary containing the special_operators Notes ----- Direct setting of operators (`dict[key] = value`) into the dictionary has been deprecated in favor of the `.add(name, value)` method, so that unique dictionary keys can be generated and tracked consistently. """ def __init__(self, unique_id=0, special_operators={}): super().__init__(special_operators) self._unique_id = unique_id def _set_value(self, key, val): if key in self: raise ValueError(f'Special operator "{key}" already exists') else: super().__setitem__(key, val) def __setitem__(self, key, val): self._set_value(key, val) warnings.warn( DeprecationWarning( """ Special operator dictionary assignment has been deprecated. Please use `.add` instead, so that you can capture a unique key for your operator. """ ) ) def _get_unique_id(self): self._unique_id += 1 return self._unique_id def add(self, operator_name, operator): """ Adds a special operator to the dictionary, and then returns the unique key that the operator is stored under for later reference. Parameters ---------- operator_name: str the name for the operator operator: function the actual operator function which will be used Returns ------- the unique operator key for the dictionary `(operator_name, unique_id)` """ key = (operator_name, self._get_unique_id()) self._set_value(key, operator) return key
7caa7bfb5ccf1fdae3e97d9c0a8f4e02e7dd1432badc05640993ec6137366e72
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Mathematical models.""" # pylint: disable=line-too-long, too-many-lines, too-many-arguments, invalid-name import warnings import numpy as np from astropy import units as u from astropy.units import Quantity, UnitsError from astropy.utils.compat.optional_deps import HAS_SCIPY from astropy.utils.exceptions import AstropyDeprecationWarning from .core import Fittable1DModel, Fittable2DModel from .parameters import InputParameterError, Parameter from .utils import ellipse_extent __all__ = [ "AiryDisk2D", "Moffat1D", "Moffat2D", "Box1D", "Box2D", "Const1D", "Const2D", "Ellipse2D", "Disk2D", "Gaussian1D", "Gaussian2D", "Linear1D", "Lorentz1D", "RickerWavelet1D", "RickerWavelet2D", "RedshiftScaleFactor", "Multiply", "Planar2D", "Scale", "Sersic1D", "Sersic2D", "Shift", "Sine1D", "Cosine1D", "Tangent1D", "ArcSine1D", "ArcCosine1D", "ArcTangent1D", "Trapezoid1D", "TrapezoidDisk2D", "Ring2D", "Voigt1D", "KingProjectedAnalytic1D", "Exponential1D", "Logarithmic1D", ] TWOPI = 2 * np.pi FLOAT_EPSILON = float(np.finfo(np.float32).tiny) # Note that we define this here rather than using the value defined in # astropy.stats to avoid importing astropy.stats every time astropy.modeling # is loaded. GAUSSIAN_SIGMA_TO_FWHM = 2.0 * np.sqrt(2.0 * np.log(2.0)) class Gaussian1D(Fittable1DModel): """ One dimensional Gaussian model. Parameters ---------- amplitude : float or `~astropy.units.Quantity`. Amplitude (peak value) of the Gaussian - for a normalized profile (integrating to 1), set amplitude = 1 / (stddev * np.sqrt(2 * np.pi)) mean : float or `~astropy.units.Quantity`. Mean of the Gaussian. stddev : float or `~astropy.units.Quantity`. Standard deviation of the Gaussian with FWHM = 2 * stddev * np.sqrt(2 * np.log(2)). Notes ----- Either all or none of input ``x``, ``mean`` and ``stddev`` must be provided consistently with compatible units or as unitless numbers. Model formula: .. math:: f(x) = A e^{- \\frac{\\left(x - x_{0}\\right)^{2}}{2 \\sigma^{2}}} Examples -------- >>> from astropy.modeling import models >>> def tie_center(model): ... mean = 50 * model.stddev ... return mean >>> tied_parameters = {'mean': tie_center} Specify that 'mean' is a tied parameter in one of two ways: >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3, ... tied=tied_parameters) or >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3) >>> g1.mean.tied False >>> g1.mean.tied = tie_center >>> g1.mean.tied <function tie_center at 0x...> Fixed parameters: >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3, ... fixed={'stddev': True}) >>> g1.stddev.fixed True or >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3) >>> g1.stddev.fixed False >>> g1.stddev.fixed = True >>> g1.stddev.fixed True .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Gaussian1D plt.figure() s1 = Gaussian1D() r = np.arange(-5, 5, .01) for factor in range(1, 4): s1.amplitude = factor plt.plot(r, s1(r), color=str(0.25 * factor), lw=2) plt.axis([-5, 5, -1, 4]) plt.show() See Also -------- Gaussian2D, Box1D, Moffat1D, Lorentz1D """ amplitude = Parameter( default=1, description="Amplitude (peak value) of the Gaussian" ) mean = Parameter(default=0, description="Position of peak (Gaussian)") # Ensure stddev makes sense if its bounds are not explicitly set. # stddev must be non-zero and positive. stddev = Parameter( default=1, bounds=(FLOAT_EPSILON, None), description="Standard deviation of the Gaussian", ) def bounding_box(self, factor=5.5): """ Tuple defining the default ``bounding_box`` limits, ``(x_low, x_high)``. Parameters ---------- factor : float The multiple of `stddev` used to define the limits. The default is 5.5, corresponding to a relative error < 1e-7. Examples -------- >>> from astropy.modeling.models import Gaussian1D >>> model = Gaussian1D(mean=0, stddev=2) >>> model.bounding_box ModelBoundingBox( intervals={ x: Interval(lower=-11.0, upper=11.0) } model=Gaussian1D(inputs=('x',)) order='C' ) This range can be set directly (see: `Model.bounding_box <astropy.modeling.Model.bounding_box>`) or by using a different factor, like: >>> model.bounding_box = model.bounding_box(factor=2) >>> model.bounding_box ModelBoundingBox( intervals={ x: Interval(lower=-4.0, upper=4.0) } model=Gaussian1D(inputs=('x',)) order='C' ) """ x0 = self.mean dx = factor * self.stddev return (x0 - dx, x0 + dx) @property def fwhm(self): """Gaussian full width at half maximum.""" return self.stddev * GAUSSIAN_SIGMA_TO_FWHM @staticmethod def evaluate(x, amplitude, mean, stddev): """ Gaussian1D model function. """ return amplitude * np.exp(-0.5 * (x - mean) ** 2 / stddev**2) @staticmethod def fit_deriv(x, amplitude, mean, stddev): """ Gaussian1D model function derivatives. """ d_amplitude = np.exp(-0.5 / stddev**2 * (x - mean) ** 2) d_mean = amplitude * d_amplitude * (x - mean) / stddev**2 d_stddev = amplitude * d_amplitude * (x - mean) ** 2 / stddev**3 return [d_amplitude, d_mean, d_stddev] @property def input_units(self): if self.mean.unit is None: return None return {self.inputs[0]: self.mean.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "mean": inputs_unit[self.inputs[0]], "stddev": inputs_unit[self.inputs[0]], "amplitude": outputs_unit[self.outputs[0]], } class Gaussian2D(Fittable2DModel): r""" Two dimensional Gaussian model. Parameters ---------- amplitude : float or `~astropy.units.Quantity`. Amplitude (peak value) of the Gaussian. x_mean : float or `~astropy.units.Quantity`. Mean of the Gaussian in x. y_mean : float or `~astropy.units.Quantity`. Mean of the Gaussian in y. x_stddev : float or `~astropy.units.Quantity` or None. Standard deviation of the Gaussian in x before rotating by theta. Must be None if a covariance matrix (``cov_matrix``) is provided. If no ``cov_matrix`` is given, ``None`` means the default value (1). y_stddev : float or `~astropy.units.Quantity` or None. Standard deviation of the Gaussian in y before rotating by theta. Must be None if a covariance matrix (``cov_matrix``) is provided. If no ``cov_matrix`` is given, ``None`` means the default value (1). theta : float or `~astropy.units.Quantity`, optional. The rotation angle as an angular quantity (`~astropy.units.Quantity` or `~astropy.coordinates.Angle`) or a value in radians (as a float). The rotation angle increases counterclockwise. Must be `None` if a covariance matrix (``cov_matrix``) is provided. If no ``cov_matrix`` is given, `None` means the default value (0). cov_matrix : ndarray, optional A 2x2 covariance matrix. If specified, overrides the ``x_stddev``, ``y_stddev``, and ``theta`` defaults. Notes ----- Either all or none of input ``x, y``, ``[x,y]_mean`` and ``[x,y]_stddev`` must be provided consistently with compatible units or as unitless numbers. Model formula: .. math:: f(x, y) = A e^{-a\left(x - x_{0}\right)^{2} -b\left(x - x_{0}\right) \left(y - y_{0}\right) -c\left(y - y_{0}\right)^{2}} Using the following definitions: .. math:: a = \left(\frac{\cos^{2}{\left (\theta \right )}}{2 \sigma_{x}^{2}} + \frac{\sin^{2}{\left (\theta \right )}}{2 \sigma_{y}^{2}}\right) b = \left(\frac{\sin{\left (2 \theta \right )}}{2 \sigma_{x}^{2}} - \frac{\sin{\left (2 \theta \right )}}{2 \sigma_{y}^{2}}\right) c = \left(\frac{\sin^{2}{\left (\theta \right )}}{2 \sigma_{x}^{2}} + \frac{\cos^{2}{\left (\theta \right )}}{2 \sigma_{y}^{2}}\right) If using a ``cov_matrix``, the model is of the form: .. math:: f(x, y) = A e^{-0.5 \left( \vec{x} - \vec{x}_{0}\right)^{T} \Sigma^{-1} \left(\vec{x} - \vec{x}_{0} \right)} where :math:`\vec{x} = [x, y]`, :math:`\vec{x}_{0} = [x_{0}, y_{0}]`, and :math:`\Sigma` is the covariance matrix: .. math:: \Sigma = \left(\begin{array}{ccc} \sigma_x^2 & \rho \sigma_x \sigma_y \\ \rho \sigma_x \sigma_y & \sigma_y^2 \end{array}\right) :math:`\rho` is the correlation between ``x`` and ``y``, which should be between -1 and +1. Positive correlation corresponds to a ``theta`` in the range 0 to 90 degrees. Negative correlation corresponds to a ``theta`` in the range of 0 to -90 degrees. See [1]_ for more details about the 2D Gaussian function. See Also -------- Gaussian1D, Box2D, Moffat2D References ---------- .. [1] https://en.wikipedia.org/wiki/Gaussian_function """ amplitude = Parameter(default=1, description="Amplitude of the Gaussian") x_mean = Parameter( default=0, description="Peak position (along x axis) of Gaussian" ) y_mean = Parameter( default=0, description="Peak position (along y axis) of Gaussian" ) x_stddev = Parameter( default=1, description="Standard deviation of the Gaussian (along x axis)" ) y_stddev = Parameter( default=1, description="Standard deviation of the Gaussian (along y axis)" ) theta = Parameter( default=0.0, description=( "Rotation angle either as a " "float (in radians) or a " "|Quantity| angle (optional)" ), ) def __init__( self, amplitude=amplitude.default, x_mean=x_mean.default, y_mean=y_mean.default, x_stddev=None, y_stddev=None, theta=None, cov_matrix=None, **kwargs, ): if cov_matrix is None: if x_stddev is None: x_stddev = self.__class__.x_stddev.default if y_stddev is None: y_stddev = self.__class__.y_stddev.default if theta is None: theta = self.__class__.theta.default else: if x_stddev is not None or y_stddev is not None or theta is not None: raise InputParameterError( "Cannot specify both cov_matrix and x/y_stddev/theta" ) # Compute principle coordinate system transformation cov_matrix = np.array(cov_matrix) if cov_matrix.shape != (2, 2): raise ValueError("Covariance matrix must be 2x2") eig_vals, eig_vecs = np.linalg.eig(cov_matrix) x_stddev, y_stddev = np.sqrt(eig_vals) y_vec = eig_vecs[:, 0] theta = np.arctan2(y_vec[1], y_vec[0]) # Ensure stddev makes sense if its bounds are not explicitly set. # stddev must be non-zero and positive. # TODO: Investigate why setting this in Parameter above causes # convolution tests to hang. kwargs.setdefault("bounds", {}) kwargs["bounds"].setdefault("x_stddev", (FLOAT_EPSILON, None)) kwargs["bounds"].setdefault("y_stddev", (FLOAT_EPSILON, None)) super().__init__( amplitude=amplitude, x_mean=x_mean, y_mean=y_mean, x_stddev=x_stddev, y_stddev=y_stddev, theta=theta, **kwargs, ) @property def x_fwhm(self): """Gaussian full width at half maximum in X.""" return self.x_stddev * GAUSSIAN_SIGMA_TO_FWHM @property def y_fwhm(self): """Gaussian full width at half maximum in Y.""" return self.y_stddev * GAUSSIAN_SIGMA_TO_FWHM def bounding_box(self, factor=5.5): """ Tuple defining the default ``bounding_box`` limits in each dimension, ``((y_low, y_high), (x_low, x_high))``. The default offset from the mean is 5.5-sigma, corresponding to a relative error < 1e-7. The limits are adjusted for rotation. Parameters ---------- factor : float, optional The multiple of `x_stddev` and `y_stddev` used to define the limits. The default is 5.5. Examples -------- >>> from astropy.modeling.models import Gaussian2D >>> model = Gaussian2D(x_mean=0, y_mean=0, x_stddev=1, y_stddev=2) >>> model.bounding_box ModelBoundingBox( intervals={ x: Interval(lower=-5.5, upper=5.5) y: Interval(lower=-11.0, upper=11.0) } model=Gaussian2D(inputs=('x', 'y')) order='C' ) This range can be set directly (see: `Model.bounding_box <astropy.modeling.Model.bounding_box>`) or by using a different factor like: >>> model.bounding_box = model.bounding_box(factor=2) >>> model.bounding_box ModelBoundingBox( intervals={ x: Interval(lower=-2.0, upper=2.0) y: Interval(lower=-4.0, upper=4.0) } model=Gaussian2D(inputs=('x', 'y')) order='C' ) """ a = factor * self.x_stddev b = factor * self.y_stddev dx, dy = ellipse_extent(a, b, self.theta) return ( (self.y_mean - dy, self.y_mean + dy), (self.x_mean - dx, self.x_mean + dx), ) @staticmethod def evaluate(x, y, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta): """Two dimensional Gaussian function.""" cost2 = np.cos(theta) ** 2 sint2 = np.sin(theta) ** 2 sin2t = np.sin(2.0 * theta) xstd2 = x_stddev**2 ystd2 = y_stddev**2 xdiff = x - x_mean ydiff = y - y_mean a = 0.5 * ((cost2 / xstd2) + (sint2 / ystd2)) b = 0.5 * ((sin2t / xstd2) - (sin2t / ystd2)) c = 0.5 * ((sint2 / xstd2) + (cost2 / ystd2)) return amplitude * np.exp( -((a * xdiff**2) + (b * xdiff * ydiff) + (c * ydiff**2)) ) @staticmethod def fit_deriv(x, y, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta): """Two dimensional Gaussian function derivative with respect to parameters.""" cost = np.cos(theta) sint = np.sin(theta) cost2 = np.cos(theta) ** 2 sint2 = np.sin(theta) ** 2 cos2t = np.cos(2.0 * theta) sin2t = np.sin(2.0 * theta) xstd2 = x_stddev**2 ystd2 = y_stddev**2 xstd3 = x_stddev**3 ystd3 = y_stddev**3 xdiff = x - x_mean ydiff = y - y_mean xdiff2 = xdiff**2 ydiff2 = ydiff**2 a = 0.5 * ((cost2 / xstd2) + (sint2 / ystd2)) b = 0.5 * ((sin2t / xstd2) - (sin2t / ystd2)) c = 0.5 * ((sint2 / xstd2) + (cost2 / ystd2)) g = amplitude * np.exp(-((a * xdiff2) + (b * xdiff * ydiff) + (c * ydiff2))) da_dtheta = sint * cost * ((1.0 / ystd2) - (1.0 / xstd2)) da_dx_stddev = -cost2 / xstd3 da_dy_stddev = -sint2 / ystd3 db_dtheta = (cos2t / xstd2) - (cos2t / ystd2) db_dx_stddev = -sin2t / xstd3 db_dy_stddev = sin2t / ystd3 dc_dtheta = -da_dtheta dc_dx_stddev = -sint2 / xstd3 dc_dy_stddev = -cost2 / ystd3 dg_dA = g / amplitude dg_dx_mean = g * ((2.0 * a * xdiff) + (b * ydiff)) dg_dy_mean = g * ((b * xdiff) + (2.0 * c * ydiff)) dg_dx_stddev = g * ( -( da_dx_stddev * xdiff2 + db_dx_stddev * xdiff * ydiff + dc_dx_stddev * ydiff2 ) ) dg_dy_stddev = g * ( -( da_dy_stddev * xdiff2 + db_dy_stddev * xdiff * ydiff + dc_dy_stddev * ydiff2 ) ) dg_dtheta = g * ( -(da_dtheta * xdiff2 + db_dtheta * xdiff * ydiff + dc_dtheta * ydiff2) ) return [dg_dA, dg_dx_mean, dg_dy_mean, dg_dx_stddev, dg_dy_stddev, dg_dtheta] @property def input_units(self): if self.x_mean.unit is None and self.y_mean.unit is None: return None return {self.inputs[0]: self.x_mean.unit, self.inputs[1]: self.y_mean.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): # Note that here we need to make sure that x and y are in the same # units otherwise this can lead to issues since rotation is not well # defined. if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]: raise UnitsError("Units of 'x' and 'y' inputs should match") return { "x_mean": inputs_unit[self.inputs[0]], "y_mean": inputs_unit[self.inputs[0]], "x_stddev": inputs_unit[self.inputs[0]], "y_stddev": inputs_unit[self.inputs[0]], "theta": u.rad, "amplitude": outputs_unit[self.outputs[0]], } class Shift(Fittable1DModel): """ Shift a coordinate. Parameters ---------- offset : float Offset to add to a coordinate. """ offset = Parameter(default=0, description="Offset to add to a model") linear = True _has_inverse_bounding_box = True @property def input_units(self): if self.offset.unit is None: return None return {self.inputs[0]: self.offset.unit} @property def inverse(self): """One dimensional inverse Shift model function.""" inv = self.copy() inv.offset *= -1 try: self.bounding_box except NotImplementedError: pass else: inv.bounding_box = tuple( self.evaluate(x, self.offset) for x in self.bounding_box ) return inv @staticmethod def evaluate(x, offset): """One dimensional Shift model function.""" return x + offset @staticmethod def sum_of_implicit_terms(x): """Evaluate the implicit term (x) of one dimensional Shift model.""" return x @staticmethod def fit_deriv(x, *params): """One dimensional Shift model derivative with respect to parameter.""" d_offset = np.ones_like(x) return [d_offset] def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {"offset": outputs_unit[self.outputs[0]]} class Scale(Fittable1DModel): """ Multiply a model by a dimensionless factor. Parameters ---------- factor : float Factor by which to scale a coordinate. Notes ----- If ``factor`` is a `~astropy.units.Quantity` then the units will be stripped before the scaling operation. """ factor = Parameter(default=1, description="Factor by which to scale a model") linear = True fittable = True _input_units_strict = True _input_units_allow_dimensionless = True _has_inverse_bounding_box = True @property def input_units(self): if self.factor.unit is None: return None return {self.inputs[0]: self.factor.unit} @property def inverse(self): """One dimensional inverse Scale model function.""" inv = self.copy() inv.factor = 1 / self.factor try: self.bounding_box except NotImplementedError: pass else: inv.bounding_box = tuple( self.evaluate(x, self.factor) for x in self.bounding_box.bounding_box() ) return inv @staticmethod def evaluate(x, factor): """One dimensional Scale model function.""" if isinstance(factor, u.Quantity): factor = factor.value return factor * x @staticmethod def fit_deriv(x, *params): """One dimensional Scale model derivative with respect to parameter.""" d_factor = x return [d_factor] def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {"factor": outputs_unit[self.outputs[0]]} class Multiply(Fittable1DModel): """ Multiply a model by a quantity or number. Parameters ---------- factor : float Factor by which to multiply a coordinate. """ factor = Parameter(default=1, description="Factor by which to multiply a model") linear = True fittable = True _has_inverse_bounding_box = True @property def inverse(self): """One dimensional inverse multiply model function.""" inv = self.copy() inv.factor = 1 / self.factor try: self.bounding_box except NotImplementedError: pass else: inv.bounding_box = tuple( self.evaluate(x, self.factor) for x in self.bounding_box.bounding_box() ) return inv @staticmethod def evaluate(x, factor): """One dimensional multiply model function.""" return factor * x @staticmethod def fit_deriv(x, *params): """One dimensional multiply model derivative with respect to parameter.""" d_factor = x return [d_factor] def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {"factor": outputs_unit[self.outputs[0]]} class RedshiftScaleFactor(Fittable1DModel): """ One dimensional redshift scale factor model. Parameters ---------- z : float Redshift value. Notes ----- Model formula: .. math:: f(x) = x (1 + z) """ z = Parameter(description="Redshift", default=0) _has_inverse_bounding_box = True @staticmethod def evaluate(x, z): """One dimensional RedshiftScaleFactor model function.""" return (1 + z) * x @staticmethod def fit_deriv(x, z): """One dimensional RedshiftScaleFactor model derivative.""" d_z = x return [d_z] @property def inverse(self): """Inverse RedshiftScaleFactor model.""" inv = self.copy() inv.z = 1.0 / (1.0 + self.z) - 1.0 try: self.bounding_box except NotImplementedError: pass else: inv.bounding_box = tuple( self.evaluate(x, self.z) for x in self.bounding_box.bounding_box() ) return inv class Sersic1D(Fittable1DModel): r""" One dimensional Sersic surface brightness profile. Parameters ---------- amplitude : float Surface brightness at r_eff. r_eff : float Effective (half-light) radius n : float Sersic Index. See Also -------- Gaussian1D, Moffat1D, Lorentz1D Notes ----- Model formula: .. math:: I(r)=I_e\exp\left\{-b_n\left[\left(\frac{r}{r_{e}}\right)^{(1/n)}-1\right]\right\} The constant :math:`b_n` is defined such that :math:`r_e` contains half the total luminosity, and can be solved for numerically. .. math:: \Gamma(2n) = 2\gamma (b_n,2n) Examples -------- .. plot:: :include-source: import numpy as np from astropy.modeling.models import Sersic1D import matplotlib.pyplot as plt plt.figure() plt.subplot(111, xscale='log', yscale='log') s1 = Sersic1D(amplitude=1, r_eff=5) r=np.arange(0, 100, .01) for n in range(1, 10): s1.n = n plt.plot(r, s1(r), color=str(float(n) / 15)) plt.axis([1e-1, 30, 1e-2, 1e3]) plt.xlabel('log Radius') plt.ylabel('log Surface Brightness') plt.text(.25, 1.5, 'n=1') plt.text(.25, 300, 'n=10') plt.xticks([]) plt.yticks([]) plt.show() References ---------- .. [1] http://ned.ipac.caltech.edu/level5/March05/Graham/Graham2.html """ amplitude = Parameter(default=1, description="Surface brightness at r_eff") r_eff = Parameter(default=1, description="Effective (half-light) radius") n = Parameter(default=4, description="Sersic Index") _gammaincinv = None @classmethod def evaluate(cls, r, amplitude, r_eff, n): """One dimensional Sersic profile function.""" if cls._gammaincinv is None: from scipy.special import gammaincinv cls._gammaincinv = gammaincinv return amplitude * np.exp( -cls._gammaincinv(2 * n, 0.5) * ((r / r_eff) ** (1 / n) - 1) ) @property def input_units(self): if self.r_eff.unit is None: return None return {self.inputs[0]: self.r_eff.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "r_eff": inputs_unit[self.inputs[0]], "amplitude": outputs_unit[self.outputs[0]], } class _Trigonometric1D(Fittable1DModel): """ Base class for one dimensional trigonometric and inverse trigonometric models. Parameters ---------- amplitude : float Oscillation amplitude frequency : float Oscillation frequency phase : float Oscillation phase """ amplitude = Parameter(default=1, description="Oscillation amplitude") frequency = Parameter(default=1, description="Oscillation frequency") phase = Parameter(default=0, description="Oscillation phase") @property def input_units(self): if self.frequency.unit is None: return None return {self.inputs[0]: 1.0 / self.frequency.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "frequency": inputs_unit[self.inputs[0]] ** -1, "amplitude": outputs_unit[self.outputs[0]], } class Sine1D(_Trigonometric1D): """ One dimensional Sine model. Parameters ---------- amplitude : float Oscillation amplitude frequency : float Oscillation frequency phase : float Oscillation phase See Also -------- ArcSine1D, Cosine1D, Tangent1D, Const1D, Linear1D Notes ----- Model formula: .. math:: f(x) = A \\sin(2 \\pi f x + 2 \\pi p) Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Sine1D plt.figure() s1 = Sine1D(amplitude=1, frequency=.25) r=np.arange(0, 10, .01) for amplitude in range(1,4): s1.amplitude = amplitude plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2) plt.axis([0, 10, -5, 5]) plt.show() """ @staticmethod def evaluate(x, amplitude, frequency, phase): """One dimensional Sine model function.""" # Note: If frequency and x are quantities, they should normally have # inverse units, so that argument ends up being dimensionless. However, # np.sin of a dimensionless quantity will crash, so we remove the # quantity-ness from argument in this case (another option would be to # multiply by * u.rad but this would be slower overall). argument = TWOPI * (frequency * x + phase) if isinstance(argument, Quantity): argument = argument.value return amplitude * np.sin(argument) @staticmethod def fit_deriv(x, amplitude, frequency, phase): """One dimensional Sine model derivative.""" d_amplitude = np.sin(TWOPI * frequency * x + TWOPI * phase) d_frequency = ( TWOPI * x * amplitude * np.cos(TWOPI * frequency * x + TWOPI * phase) ) d_phase = TWOPI * amplitude * np.cos(TWOPI * frequency * x + TWOPI * phase) return [d_amplitude, d_frequency, d_phase] @property def inverse(self): """One dimensional inverse of Sine.""" return ArcSine1D( amplitude=self.amplitude, frequency=self.frequency, phase=self.phase ) class Cosine1D(_Trigonometric1D): """ One dimensional Cosine model. Parameters ---------- amplitude : float Oscillation amplitude frequency : float Oscillation frequency phase : float Oscillation phase See Also -------- ArcCosine1D, Sine1D, Tangent1D, Const1D, Linear1D Notes ----- Model formula: .. math:: f(x) = A \\cos(2 \\pi f x + 2 \\pi p) Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Cosine1D plt.figure() s1 = Cosine1D(amplitude=1, frequency=.25) r=np.arange(0, 10, .01) for amplitude in range(1,4): s1.amplitude = amplitude plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2) plt.axis([0, 10, -5, 5]) plt.show() """ @staticmethod def evaluate(x, amplitude, frequency, phase): """One dimensional Cosine model function.""" # Note: If frequency and x are quantities, they should normally have # inverse units, so that argument ends up being dimensionless. However, # np.sin of a dimensionless quantity will crash, so we remove the # quantity-ness from argument in this case (another option would be to # multiply by * u.rad but this would be slower overall). argument = TWOPI * (frequency * x + phase) if isinstance(argument, Quantity): argument = argument.value return amplitude * np.cos(argument) @staticmethod def fit_deriv(x, amplitude, frequency, phase): """One dimensional Cosine model derivative.""" d_amplitude = np.cos(TWOPI * frequency * x + TWOPI * phase) d_frequency = -( TWOPI * x * amplitude * np.sin(TWOPI * frequency * x + TWOPI * phase) ) d_phase = -(TWOPI * amplitude * np.sin(TWOPI * frequency * x + TWOPI * phase)) return [d_amplitude, d_frequency, d_phase] @property def inverse(self): """One dimensional inverse of Cosine.""" return ArcCosine1D( amplitude=self.amplitude, frequency=self.frequency, phase=self.phase ) class Tangent1D(_Trigonometric1D): """ One dimensional Tangent model. Parameters ---------- amplitude : float Oscillation amplitude frequency : float Oscillation frequency phase : float Oscillation phase See Also -------- Sine1D, Cosine1D, Const1D, Linear1D Notes ----- Model formula: .. math:: f(x) = A \\tan(2 \\pi f x + 2 \\pi p) Note that the tangent function is undefined for inputs of the form pi/2 + n*pi for all integers n. Thus thus the default bounding box has been restricted to: .. math:: [(-1/4 - p)/f, (1/4 - p)/f] which is the smallest interval for the tangent function to be continuous on. Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Tangent1D plt.figure() s1 = Tangent1D(amplitude=1, frequency=.25) r=np.arange(0, 10, .01) for amplitude in range(1,4): s1.amplitude = amplitude plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2) plt.axis([0, 10, -5, 5]) plt.show() """ @staticmethod def evaluate(x, amplitude, frequency, phase): """One dimensional Tangent model function.""" # Note: If frequency and x are quantities, they should normally have # inverse units, so that argument ends up being dimensionless. However, # np.sin of a dimensionless quantity will crash, so we remove the # quantity-ness from argument in this case (another option would be to # multiply by * u.rad but this would be slower overall). argument = TWOPI * (frequency * x + phase) if isinstance(argument, Quantity): argument = argument.value return amplitude * np.tan(argument) @staticmethod def fit_deriv(x, amplitude, frequency, phase): """One dimensional Tangent model derivative.""" sec = 1 / (np.cos(TWOPI * frequency * x + TWOPI * phase)) ** 2 d_amplitude = np.tan(TWOPI * frequency * x + TWOPI * phase) d_frequency = TWOPI * x * amplitude * sec d_phase = TWOPI * amplitude * sec return [d_amplitude, d_frequency, d_phase] @property def inverse(self): """One dimensional inverse of Tangent.""" return ArcTangent1D( amplitude=self.amplitude, frequency=self.frequency, phase=self.phase ) def bounding_box(self): """ Tuple defining the default ``bounding_box`` limits, ``(x_low, x_high)``. """ bbox = [ (-1 / 4 - self.phase) / self.frequency, (1 / 4 - self.phase) / self.frequency, ] if self.frequency.unit is not None: bbox = bbox / self.frequency.unit return bbox class _InverseTrigonometric1D(_Trigonometric1D): """ Base class for one dimensional inverse trigonometric models. """ @property def input_units(self): if self.amplitude.unit is None: return None return {self.inputs[0]: self.amplitude.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "frequency": outputs_unit[self.outputs[0]] ** -1, "amplitude": inputs_unit[self.inputs[0]], } class ArcSine1D(_InverseTrigonometric1D): """ One dimensional ArcSine model returning values between -pi/2 and pi/2 only. Parameters ---------- amplitude : float Oscillation amplitude for corresponding Sine frequency : float Oscillation frequency for corresponding Sine phase : float Oscillation phase for corresponding Sine See Also -------- Sine1D, ArcCosine1D, ArcTangent1D Notes ----- Model formula: .. math:: f(x) = ((arcsin(x / A) / 2pi) - p) / f The arcsin function being used for this model will only accept inputs in [-A, A]; otherwise, a runtime warning will be thrown and the result will be NaN. To avoid this, the bounding_box has been properly set to accommodate this; therefore, it is recommended that this model always be evaluated with the ``with_bounding_box=True`` option. Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import ArcSine1D plt.figure() s1 = ArcSine1D(amplitude=1, frequency=.25) r=np.arange(-1, 1, .01) for amplitude in range(1,4): s1.amplitude = amplitude plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2) plt.axis([-1, 1, -np.pi/2, np.pi/2]) plt.show() """ @staticmethod def evaluate(x, amplitude, frequency, phase): """One dimensional ArcSine model function.""" # Note: If frequency and x are quantities, they should normally have # inverse units, so that argument ends up being dimensionless. However, # np.sin of a dimensionless quantity will crash, so we remove the # quantity-ness from argument in this case (another option would be to # multiply by * u.rad but this would be slower overall). argument = x / amplitude if isinstance(argument, Quantity): argument = argument.value arc_sine = np.arcsin(argument) / TWOPI return (arc_sine - phase) / frequency @staticmethod def fit_deriv(x, amplitude, frequency, phase): """One dimensional ArcSine model derivative.""" d_amplitude = -x / ( TWOPI * frequency * amplitude**2 * np.sqrt(1 - (x / amplitude) ** 2) ) d_frequency = (phase - (np.arcsin(x / amplitude) / TWOPI)) / frequency**2 d_phase = -1 / frequency * np.ones(x.shape) return [d_amplitude, d_frequency, d_phase] def bounding_box(self): """ Tuple defining the default ``bounding_box`` limits, ``(x_low, x_high)``. """ return -1 * self.amplitude, 1 * self.amplitude @property def inverse(self): """One dimensional inverse of ArcSine.""" return Sine1D( amplitude=self.amplitude, frequency=self.frequency, phase=self.phase ) class ArcCosine1D(_InverseTrigonometric1D): """ One dimensional ArcCosine returning values between 0 and pi only. Parameters ---------- amplitude : float Oscillation amplitude for corresponding Cosine frequency : float Oscillation frequency for corresponding Cosine phase : float Oscillation phase for corresponding Cosine See Also -------- Cosine1D, ArcSine1D, ArcTangent1D Notes ----- Model formula: .. math:: f(x) = ((arccos(x / A) / 2pi) - p) / f The arccos function being used for this model will only accept inputs in [-A, A]; otherwise, a runtime warning will be thrown and the result will be NaN. To avoid this, the bounding_box has been properly set to accommodate this; therefore, it is recommended that this model always be evaluated with the ``with_bounding_box=True`` option. Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import ArcCosine1D plt.figure() s1 = ArcCosine1D(amplitude=1, frequency=.25) r=np.arange(-1, 1, .01) for amplitude in range(1,4): s1.amplitude = amplitude plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2) plt.axis([-1, 1, 0, np.pi]) plt.show() """ @staticmethod def evaluate(x, amplitude, frequency, phase): """One dimensional ArcCosine model function.""" # Note: If frequency and x are quantities, they should normally have # inverse units, so that argument ends up being dimensionless. However, # np.sin of a dimensionless quantity will crash, so we remove the # quantity-ness from argument in this case (another option would be to # multiply by * u.rad but this would be slower overall). argument = x / amplitude if isinstance(argument, Quantity): argument = argument.value arc_cos = np.arccos(argument) / TWOPI return (arc_cos - phase) / frequency @staticmethod def fit_deriv(x, amplitude, frequency, phase): """One dimensional ArcCosine model derivative.""" d_amplitude = x / ( TWOPI * frequency * amplitude**2 * np.sqrt(1 - (x / amplitude) ** 2) ) d_frequency = (phase - (np.arccos(x / amplitude) / TWOPI)) / frequency**2 d_phase = -1 / frequency * np.ones(x.shape) return [d_amplitude, d_frequency, d_phase] def bounding_box(self): """ Tuple defining the default ``bounding_box`` limits, ``(x_low, x_high)``. """ return -1 * self.amplitude, 1 * self.amplitude @property def inverse(self): """One dimensional inverse of ArcCosine.""" return Cosine1D( amplitude=self.amplitude, frequency=self.frequency, phase=self.phase ) class ArcTangent1D(_InverseTrigonometric1D): """ One dimensional ArcTangent model returning values between -pi/2 and pi/2 only. Parameters ---------- amplitude : float Oscillation amplitude for corresponding Tangent frequency : float Oscillation frequency for corresponding Tangent phase : float Oscillation phase for corresponding Tangent See Also -------- Tangent1D, ArcSine1D, ArcCosine1D Notes ----- Model formula: .. math:: f(x) = ((arctan(x / A) / 2pi) - p) / f Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import ArcTangent1D plt.figure() s1 = ArcTangent1D(amplitude=1, frequency=.25) r=np.arange(-10, 10, .01) for amplitude in range(1,4): s1.amplitude = amplitude plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2) plt.axis([-10, 10, -np.pi/2, np.pi/2]) plt.show() """ @staticmethod def evaluate(x, amplitude, frequency, phase): """One dimensional ArcTangent model function.""" # Note: If frequency and x are quantities, they should normally have # inverse units, so that argument ends up being dimensionless. However, # np.sin of a dimensionless quantity will crash, so we remove the # quantity-ness from argument in this case (another option would be to # multiply by * u.rad but this would be slower overall). argument = x / amplitude if isinstance(argument, Quantity): argument = argument.value arc_cos = np.arctan(argument) / TWOPI return (arc_cos - phase) / frequency @staticmethod def fit_deriv(x, amplitude, frequency, phase): """One dimensional ArcTangent model derivative.""" d_amplitude = -x / ( TWOPI * frequency * amplitude**2 * (1 + (x / amplitude) ** 2) ) d_frequency = (phase - (np.arctan(x / amplitude) / TWOPI)) / frequency**2 d_phase = -1 / frequency * np.ones(x.shape) return [d_amplitude, d_frequency, d_phase] @property def inverse(self): """One dimensional inverse of ArcTangent.""" return Tangent1D( amplitude=self.amplitude, frequency=self.frequency, phase=self.phase ) class Linear1D(Fittable1DModel): """ One dimensional Line model. Parameters ---------- slope : float Slope of the straight line intercept : float Intercept of the straight line See Also -------- Const1D Notes ----- Model formula: .. math:: f(x) = a x + b """ slope = Parameter(default=1, description="Slope of the straight line") intercept = Parameter(default=0, description="Intercept of the straight line") linear = True @staticmethod def evaluate(x, slope, intercept): """One dimensional Line model function.""" return slope * x + intercept @staticmethod def fit_deriv(x, *params): """One dimensional Line model derivative with respect to parameters.""" d_slope = x d_intercept = np.ones_like(x) return [d_slope, d_intercept] @property def inverse(self): new_slope = self.slope**-1 new_intercept = -self.intercept / self.slope return self.__class__(slope=new_slope, intercept=new_intercept) @property def input_units(self): if self.intercept.unit is None and self.slope.unit is None: return None return {self.inputs[0]: self.intercept.unit / self.slope.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "intercept": outputs_unit[self.outputs[0]], "slope": outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]], } class Planar2D(Fittable2DModel): """ Two dimensional Plane model. Parameters ---------- slope_x : float Slope of the plane in X slope_y : float Slope of the plane in Y intercept : float Z-intercept of the plane Notes ----- Model formula: .. math:: f(x, y) = a x + b y + c """ slope_x = Parameter(default=1, description="Slope of the plane in X") slope_y = Parameter(default=1, description="Slope of the plane in Y") intercept = Parameter(default=0, description="Z-intercept of the plane") linear = True @staticmethod def evaluate(x, y, slope_x, slope_y, intercept): """Two dimensional Plane model function.""" return slope_x * x + slope_y * y + intercept @staticmethod def fit_deriv(x, y, *params): """Two dimensional Plane model derivative with respect to parameters.""" d_slope_x = x d_slope_y = y d_intercept = np.ones_like(x) return [d_slope_x, d_slope_y, d_intercept] def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "intercept": outputs_unit["z"], "slope_x": outputs_unit["z"] / inputs_unit["x"], "slope_y": outputs_unit["z"] / inputs_unit["y"], } class Lorentz1D(Fittable1DModel): """ One dimensional Lorentzian model. Parameters ---------- amplitude : float or `~astropy.units.Quantity`. Peak value - for a normalized profile (integrating to 1), set amplitude = 2 / (np.pi * fwhm) x_0 : float or `~astropy.units.Quantity`. Position of the peak fwhm : float or `~astropy.units.Quantity`. Full width at half maximum (FWHM) See Also -------- Gaussian1D, Box1D, RickerWavelet1D Notes ----- Either all or none of input ``x``, position ``x_0`` and ``fwhm`` must be provided consistently with compatible units or as unitless numbers. Model formula: .. math:: f(x) = \\frac{A \\gamma^{2}}{\\gamma^{2} + \\left(x - x_{0}\\right)^{2}} where :math:`\\gamma` is half of given FWHM. Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Lorentz1D plt.figure() s1 = Lorentz1D() r = np.arange(-5, 5, .01) for factor in range(1, 4): s1.amplitude = factor plt.plot(r, s1(r), color=str(0.25 * factor), lw=2) plt.axis([-5, 5, -1, 4]) plt.show() """ amplitude = Parameter(default=1, description="Peak value") x_0 = Parameter(default=0, description="Position of the peak") fwhm = Parameter(default=1, description="Full width at half maximum") @staticmethod def evaluate(x, amplitude, x_0, fwhm): """One dimensional Lorentzian model function.""" return amplitude * ((fwhm / 2.0) ** 2) / ((x - x_0) ** 2 + (fwhm / 2.0) ** 2) @staticmethod def fit_deriv(x, amplitude, x_0, fwhm): """One dimensional Lorentzian model derivative with respect to parameters.""" d_amplitude = fwhm**2 / (fwhm**2 + (x - x_0) ** 2) d_x_0 = ( amplitude * d_amplitude * (2 * x - 2 * x_0) / (fwhm**2 + (x - x_0) ** 2) ) d_fwhm = 2 * amplitude * d_amplitude / fwhm * (1 - d_amplitude) return [d_amplitude, d_x_0, d_fwhm] def bounding_box(self, factor=25): """Tuple defining the default ``bounding_box`` limits, ``(x_low, x_high)``. Parameters ---------- factor : float The multiple of FWHM used to define the limits. Default is chosen to include most (99%) of the area under the curve, while still showing the central feature of interest. """ x0 = self.x_0 dx = factor * self.fwhm return (x0 - dx, x0 + dx) @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "x_0": inputs_unit[self.inputs[0]], "fwhm": inputs_unit[self.inputs[0]], "amplitude": outputs_unit[self.outputs[0]], } class Voigt1D(Fittable1DModel): """ One dimensional model for the Voigt profile. Parameters ---------- x_0 : float or `~astropy.units.Quantity` Position of the peak amplitude_L : float or `~astropy.units.Quantity`. The Lorentzian amplitude (peak of the associated Lorentz function) - for a normalized profile (integrating to 1), set amplitude_L = 2 / (np.pi * fwhm_L) fwhm_L : float or `~astropy.units.Quantity` The Lorentzian full width at half maximum fwhm_G : float or `~astropy.units.Quantity`. The Gaussian full width at half maximum method : str, optional Algorithm for computing the complex error function; one of 'Humlicek2' (default, fast and generally more accurate than ``rtol=3.e-5``) or 'Scipy', alternatively 'wofz' (requires ``scipy``, almost as fast and reference in accuracy). See Also -------- Gaussian1D, Lorentz1D Notes ----- Either all or none of input ``x``, position ``x_0`` and the ``fwhm_*`` must be provided consistently with compatible units or as unitless numbers. Voigt function is calculated as real part of the complex error function computed from either Humlicek's rational approximations (JQSRT 21:309, 1979; 27:437, 1982) following Schreier 2018 (MNRAS 479, 3068; and ``hum2zpf16m`` from his cpfX.py module); or `~scipy.special.wofz` (implementing 'Faddeeva.cc'). Examples -------- .. plot:: :include-source: import numpy as np from astropy.modeling.models import Voigt1D import matplotlib.pyplot as plt plt.figure() x = np.arange(0, 10, 0.01) v1 = Voigt1D(x_0=5, amplitude_L=10, fwhm_L=0.5, fwhm_G=0.9) plt.plot(x, v1(x)) plt.show() """ x_0 = Parameter(default=0, description="Position of the peak") amplitude_L = Parameter(default=1, description="The Lorentzian amplitude") fwhm_L = Parameter( default=2 / np.pi, description="The Lorentzian full width at half maximum" ) fwhm_G = Parameter( default=np.log(2), description="The Gaussian full width at half maximum" ) sqrt_pi = np.sqrt(np.pi) sqrt_ln2 = np.sqrt(np.log(2)) sqrt_ln2pi = np.sqrt(np.log(2) * np.pi) _last_z = np.zeros(1, dtype=complex) _last_w = np.zeros(1, dtype=float) _faddeeva = None def __init__( self, x_0=x_0.default, amplitude_L=amplitude_L.default, fwhm_L=fwhm_L.default, fwhm_G=fwhm_G.default, method=None, **kwargs, ): if str(method).lower() == "humlicek2" and HAS_SCIPY: warnings.warn( f"{method} has been deprecated since Astropy 5.3 and will be removed in a future version.\n" "It is recommended to always use the `~scipy.special.wofz` implementation " "when `scipy` is installed.", AstropyDeprecationWarning, ) if method is None: if HAS_SCIPY: method = "wofz" else: method = "humlicek2" if str(method).lower() in ("wofz", "scipy"): from scipy.special import wofz self._faddeeva = wofz elif str(method).lower() == "humlicek2": self._faddeeva = self._hum2zpf16c else: raise ValueError( f"Not a valid method for Voigt1D Faddeeva function: {method}." ) self.method = self._faddeeva.__name__ super().__init__( x_0=x_0, amplitude_L=amplitude_L, fwhm_L=fwhm_L, fwhm_G=fwhm_G, **kwargs ) def _wrap_wofz(self, z): """Call complex error (Faddeeva) function w(z) implemented by algorithm `method`; cache results for consecutive calls from `evaluate`, `fit_deriv`. """ if z.shape == self._last_z.shape and np.allclose( z, self._last_z, rtol=1.0e-14, atol=1.0e-15 ): return self._last_w self._last_z = ( z.to_value(u.dimensionless_unscaled) if isinstance(z, u.Quantity) else z ) self._last_w = self._faddeeva(self._last_z) return self._last_w def evaluate(self, x, x_0, amplitude_L, fwhm_L, fwhm_G): """One dimensional Voigt function scaled to Lorentz peak amplitude.""" z = np.atleast_1d(2 * (x - x_0) + 1j * fwhm_L) * self.sqrt_ln2 / fwhm_G # The normalised Voigt profile is w.real * self.sqrt_ln2 / (self.sqrt_pi * fwhm_G) * 2 ; # for the legacy definition we multiply with np.pi * fwhm_L / 2 * amplitude_L return self._wrap_wofz(z).real * self.sqrt_ln2pi / fwhm_G * fwhm_L * amplitude_L def fit_deriv(self, x, x_0, amplitude_L, fwhm_L, fwhm_G): """ Derivative of the one dimensional Voigt function with respect to parameters. """ s = self.sqrt_ln2 / fwhm_G z = np.atleast_1d(2 * (x - x_0) + 1j * fwhm_L) * s # V * constant from McLean implementation (== their Voigt function) w = self._wrap_wofz(z) * s * fwhm_L * amplitude_L * self.sqrt_pi # Schreier (2018) Eq. 6 == (dvdx + 1j * dvdy) / (sqrt(pi) * fwhm_L * amplitude_L) dwdz = -2 * z * w + 2j * s * fwhm_L * amplitude_L return [ -dwdz.real * 2 * s, w.real / amplitude_L, w.real / fwhm_L - dwdz.imag * s, (-w.real - s * (2 * (x - x_0) * dwdz.real - fwhm_L * dwdz.imag)) / fwhm_G, ] @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "x_0": inputs_unit[self.inputs[0]], "fwhm_L": inputs_unit[self.inputs[0]], "fwhm_G": inputs_unit[self.inputs[0]], "amplitude_L": outputs_unit[self.outputs[0]], } @staticmethod def _hum2zpf16c(z, s=10.0): """Complex error function w(z = x + iy) combining Humlicek's rational approximations. |x| + y > 10: Humlicek (JQSRT, 1982) rational approximation for region II; else: Humlicek (JQSRT, 1979) rational approximation with n=16 and delta=y0=1.35 Version using a mask and np.place; single complex argument version of Franz Schreier's cpfX.hum2zpf16m. Originally licensed under a 3-clause BSD style license - see https://atmos.eoc.dlr.de/tools/lbl4IR/cpfX.py """ # Optimized (single fraction) Humlicek region I rational approximation for n=16, delta=1.35 # fmt: off AA = np.array( [ +46236.3358828121, -147726.58393079657j, -206562.80451354137, 281369.1590631087j, +183092.74968253175, -184787.96830696272j, -66155.39578477248, 57778.05827983565j, +11682.770904216826, -9442.402767960672j, -1052.8438624933142, 814.0996198624186j, +45.94499030751872, -34.59751573708725j, -0.7616559377907136, 0.5641895835476449j, ] ) # 1j/sqrt(pi) to the 12. digit bb = np.array( [ +7918.06640624997, -126689.0625, +295607.8125, -236486.25, +84459.375, -15015.0, +1365.0, -60.0, +1.0, ] ) # fmt: on sqrt_piinv = 1.0 / np.sqrt(np.pi) zz = z * z w = 1j * (z * (zz * sqrt_piinv - 1.410474)) / (0.75 + zz * (zz - 3.0)) if np.any(z.imag < s): mask = abs(z.real) + z.imag < s # returns true for interior points # returns small complex array covering only the interior region Z = z[np.where(mask)] + 1.35j ZZ = Z * Z # fmt: off # Recursive algorithms for the polynomials in Z with coefficients AA, bb # numer = 0.0 # for A in AA[::-1]: # numer = numer * Z + A # Explicitly unrolled above loop for speed numer = (((((((((((((((AA[15]*Z + AA[14])*Z + AA[13])*Z + AA[12])*Z + AA[11])*Z + AA[10])*Z + AA[9])*Z + AA[8])*Z + AA[7])*Z + AA[6])*Z + AA[5])*Z + AA[4])*Z+AA[3])*Z + AA[2])*Z + AA[1])*Z + AA[0]) # denom = 0.0 # for b in bb[::-1]: # denom = denom * ZZ + b # Explicitly unrolled above loop for speed denom = (((((((ZZ + bb[7])*ZZ + bb[6])*ZZ + bb[5])*ZZ+bb[4])*ZZ + bb[3])*ZZ + bb[2])*ZZ + bb[1])*ZZ + bb[0] # fmt: on np.place(w, mask, numer / denom) return w class Const1D(Fittable1DModel): """ One dimensional Constant model. Parameters ---------- amplitude : float Value of the constant function See Also -------- Const2D Notes ----- Model formula: .. math:: f(x) = A Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Const1D plt.figure() s1 = Const1D() r = np.arange(-5, 5, .01) for factor in range(1, 4): s1.amplitude = factor plt.plot(r, s1(r), color=str(0.25 * factor), lw=2) plt.axis([-5, 5, -1, 4]) plt.show() """ amplitude = Parameter( default=1, description="Value of the constant function", mag=True ) linear = True @staticmethod def evaluate(x, amplitude): """One dimensional Constant model function.""" if amplitude.size == 1: # This is slightly faster than using ones_like and multiplying x = np.empty_like(amplitude, shape=x.shape, dtype=x.dtype) x.fill(amplitude.item()) else: # This case is less likely but could occur if the amplitude # parameter is given an array-like value x = amplitude * np.ones_like(x, subok=False) if isinstance(amplitude, Quantity): return Quantity(x, unit=amplitude.unit, copy=False, subok=True) return x @staticmethod def fit_deriv(x, amplitude): """One dimensional Constant model derivative with respect to parameters.""" d_amplitude = np.ones_like(x) return [d_amplitude] @property def input_units(self): return None def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {"amplitude": outputs_unit[self.outputs[0]]} class Const2D(Fittable2DModel): """ Two dimensional Constant model. Parameters ---------- amplitude : float Value of the constant function See Also -------- Const1D Notes ----- Model formula: .. math:: f(x, y) = A """ amplitude = Parameter( default=1, description="Value of the constant function", mag=True ) linear = True @staticmethod def evaluate(x, y, amplitude): """Two dimensional Constant model function.""" if amplitude.size == 1: # This is slightly faster than using ones_like and multiplying x = np.empty_like(amplitude, shape=x.shape, dtype=x.dtype) x.fill(amplitude.item()) else: # This case is less likely but could occur if the amplitude # parameter is given an array-like value x = amplitude * np.ones_like(x, subok=False) if isinstance(amplitude, Quantity): return Quantity(x, unit=amplitude.unit, copy=False, subok=True) return x @property def input_units(self): return None def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {"amplitude": outputs_unit[self.outputs[0]]} class Ellipse2D(Fittable2DModel): """ A 2D Ellipse model. Parameters ---------- amplitude : float Value of the ellipse. x_0 : float x position of the center of the disk. y_0 : float y position of the center of the disk. a : float The length of the semimajor axis. b : float The length of the semiminor axis. theta : float or `~astropy.units.Quantity`, optional The rotation angle as an angular quantity (`~astropy.units.Quantity` or `~astropy.coordinates.Angle`) or a value in radians (as a float). The rotation angle increases counterclockwise from the positive x axis. See Also -------- Disk2D, Box2D Notes ----- Model formula: .. math:: f(x, y) = \\left \\{ \\begin{array}{ll} \\mathrm{amplitude} & : \\left[\\frac{(x - x_0) \\cos \\theta + (y - y_0) \\sin \\theta}{a}\\right]^2 + \\left[\\frac{-(x - x_0) \\sin \\theta + (y - y_0) \\cos \\theta}{b}\\right]^2 \\leq 1 \\\\ 0 & : \\mathrm{otherwise} \\end{array} \\right. Examples -------- .. plot:: :include-source: import numpy as np from astropy.modeling.models import Ellipse2D from astropy.coordinates import Angle import matplotlib.pyplot as plt import matplotlib.patches as mpatches x0, y0 = 25, 25 a, b = 20, 10 theta = Angle(30, 'deg') e = Ellipse2D(amplitude=100., x_0=x0, y_0=y0, a=a, b=b, theta=theta.radian) y, x = np.mgrid[0:50, 0:50] fig, ax = plt.subplots(1, 1) ax.imshow(e(x, y), origin='lower', interpolation='none', cmap='Greys_r') e2 = mpatches.Ellipse((x0, y0), 2*a, 2*b, theta.degree, edgecolor='red', facecolor='none') ax.add_patch(e2) plt.show() """ amplitude = Parameter(default=1, description="Value of the ellipse", mag=True) x_0 = Parameter(default=0, description="X position of the center of the disk.") y_0 = Parameter(default=0, description="Y position of the center of the disk.") a = Parameter(default=1, description="The length of the semimajor axis") b = Parameter(default=1, description="The length of the semiminor axis") theta = Parameter( default=0.0, description=( "Rotation angle either as a float (in radians) or a |Quantity| angle" ), ) @staticmethod def evaluate(x, y, amplitude, x_0, y_0, a, b, theta): """Two dimensional Ellipse model function.""" xx = x - x_0 yy = y - y_0 cost = np.cos(theta) sint = np.sin(theta) numerator1 = (xx * cost) + (yy * sint) numerator2 = -(xx * sint) + (yy * cost) in_ellipse = ((numerator1 / a) ** 2 + (numerator2 / b) ** 2) <= 1.0 result = np.select([in_ellipse], [amplitude]) if isinstance(amplitude, Quantity): return Quantity(result, unit=amplitude.unit, copy=False, subok=True) return result @property def bounding_box(self): """ Tuple defining the default ``bounding_box`` limits. ``((y_low, y_high), (x_low, x_high))`` """ a = self.a b = self.b theta = self.theta dx, dy = ellipse_extent(a, b, theta) return ((self.y_0 - dy, self.y_0 + dy), (self.x_0 - dx, self.x_0 + dx)) @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): # Note that here we need to make sure that x and y are in the same # units otherwise this can lead to issues since rotation is not well # defined. if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]: raise UnitsError("Units of 'x' and 'y' inputs should match") return { "x_0": inputs_unit[self.inputs[0]], "y_0": inputs_unit[self.inputs[0]], "a": inputs_unit[self.inputs[0]], "b": inputs_unit[self.inputs[0]], "theta": u.rad, "amplitude": outputs_unit[self.outputs[0]], } class Disk2D(Fittable2DModel): """ Two dimensional radial symmetric Disk model. Parameters ---------- amplitude : float Value of the disk function x_0 : float x position center of the disk y_0 : float y position center of the disk R_0 : float Radius of the disk See Also -------- Box2D, TrapezoidDisk2D Notes ----- Model formula: .. math:: f(r) = \\left \\{ \\begin{array}{ll} A & : r \\leq R_0 \\\\ 0 & : r > R_0 \\end{array} \\right. """ amplitude = Parameter(default=1, description="Value of disk function", mag=True) x_0 = Parameter(default=0, description="X position of center of the disk") y_0 = Parameter(default=0, description="Y position of center of the disk") R_0 = Parameter(default=1, description="Radius of the disk") @staticmethod def evaluate(x, y, amplitude, x_0, y_0, R_0): """Two dimensional Disk model function.""" rr = (x - x_0) ** 2 + (y - y_0) ** 2 result = np.select([rr <= R_0**2], [amplitude]) if isinstance(amplitude, Quantity): return Quantity(result, unit=amplitude.unit, copy=False, subok=True) return result @property def bounding_box(self): """ Tuple defining the default ``bounding_box`` limits. ``((y_low, y_high), (x_low, x_high))`` """ return ( (self.y_0 - self.R_0, self.y_0 + self.R_0), (self.x_0 - self.R_0, self.x_0 + self.R_0), ) @property def input_units(self): if self.x_0.unit is None and self.y_0.unit is None: return None return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): # Note that here we need to make sure that x and y are in the same # units otherwise this can lead to issues since rotation is not well # defined. if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]: raise UnitsError("Units of 'x' and 'y' inputs should match") return { "x_0": inputs_unit[self.inputs[0]], "y_0": inputs_unit[self.inputs[0]], "R_0": inputs_unit[self.inputs[0]], "amplitude": outputs_unit[self.outputs[0]], } class Ring2D(Fittable2DModel): """ Two dimensional radial symmetric Ring model. Parameters ---------- amplitude : float Value of the disk function x_0 : float x position center of the disk y_0 : float y position center of the disk r_in : float Inner radius of the ring width : float Width of the ring. r_out : float Outer Radius of the ring. Can be specified instead of width. See Also -------- Disk2D, TrapezoidDisk2D Notes ----- Model formula: .. math:: f(r) = \\left \\{ \\begin{array}{ll} A & : r_{in} \\leq r \\leq r_{out} \\\\ 0 & : \\text{else} \\end{array} \\right. Where :math:`r_{out} = r_{in} + r_{width}`. """ amplitude = Parameter(default=1, description="Value of the disk function", mag=True) x_0 = Parameter(default=0, description="X position of center of disc") y_0 = Parameter(default=0, description="Y position of center of disc") r_in = Parameter(default=1, description="Inner radius of the ring") width = Parameter(default=1, description="Width of the ring") def __init__( self, amplitude=amplitude.default, x_0=x_0.default, y_0=y_0.default, r_in=None, width=None, r_out=None, **kwargs, ): if (r_in is None) and (r_out is None) and (width is None): r_in = self.r_in.default width = self.width.default elif (r_in is not None) and (r_out is None) and (width is None): width = self.width.default elif (r_in is None) and (r_out is not None) and (width is None): r_in = self.r_in.default width = r_out - r_in elif (r_in is None) and (r_out is None) and (width is not None): r_in = self.r_in.default elif (r_in is not None) and (r_out is not None) and (width is None): width = r_out - r_in elif (r_in is None) and (r_out is not None) and (width is not None): r_in = r_out - width elif (r_in is not None) and (r_out is not None) and (width is not None): if np.any(width != (r_out - r_in)): raise InputParameterError("Width must be r_out - r_in") if np.any(r_in < 0) or np.any(width < 0): raise InputParameterError(f"{r_in=} and {width=} must both be >=0") super().__init__( amplitude=amplitude, x_0=x_0, y_0=y_0, r_in=r_in, width=width, **kwargs ) @staticmethod def evaluate(x, y, amplitude, x_0, y_0, r_in, width): """Two dimensional Ring model function.""" rr = (x - x_0) ** 2 + (y - y_0) ** 2 r_range = np.logical_and(rr >= r_in**2, rr <= (r_in + width) ** 2) result = np.select([r_range], [amplitude]) if isinstance(amplitude, Quantity): return Quantity(result, unit=amplitude.unit, copy=False, subok=True) return result @property def bounding_box(self): """ Tuple defining the default ``bounding_box``. ``((y_low, y_high), (x_low, x_high))`` """ dr = self.r_in + self.width return ((self.y_0 - dr, self.y_0 + dr), (self.x_0 - dr, self.x_0 + dr)) @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): # Note that here we need to make sure that x and y are in the same # units otherwise this can lead to issues since rotation is not well # defined. if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]: raise UnitsError("Units of 'x' and 'y' inputs should match") return { "x_0": inputs_unit[self.inputs[0]], "y_0": inputs_unit[self.inputs[0]], "r_in": inputs_unit[self.inputs[0]], "width": inputs_unit[self.inputs[0]], "amplitude": outputs_unit[self.outputs[0]], } class Box1D(Fittable1DModel): """ One dimensional Box model. Parameters ---------- amplitude : float Amplitude A x_0 : float Position of the center of the box function width : float Width of the box See Also -------- Box2D, TrapezoidDisk2D Notes ----- Model formula: .. math:: f(x) = \\left \\{ \\begin{array}{ll} A & : x_0 - w/2 \\leq x \\leq x_0 + w/2 \\\\ 0 & : \\text{else} \\end{array} \\right. Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Box1D plt.figure() s1 = Box1D() r = np.arange(-5, 5, .01) for factor in range(1, 4): s1.amplitude = factor s1.width = factor plt.plot(r, s1(r), color=str(0.25 * factor), lw=2) plt.axis([-5, 5, -1, 4]) plt.show() """ amplitude = Parameter(default=1, description="Amplitude A", mag=True) x_0 = Parameter(default=0, description="Position of center of box function") width = Parameter(default=1, description="Width of the box") @staticmethod def evaluate(x, amplitude, x_0, width): """One dimensional Box model function.""" inside = np.logical_and(x >= x_0 - width / 2.0, x <= x_0 + width / 2.0) return np.select([inside], [amplitude], 0) @property def bounding_box(self): """ Tuple defining the default ``bounding_box`` limits. ``(x_low, x_high))`` """ dx = self.width / 2 return (self.x_0 - dx, self.x_0 + dx) @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit} @property def return_units(self): if self.amplitude.unit is None: return None return {self.outputs[0]: self.amplitude.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "x_0": inputs_unit[self.inputs[0]], "width": inputs_unit[self.inputs[0]], "amplitude": outputs_unit[self.outputs[0]], } class Box2D(Fittable2DModel): """ Two dimensional Box model. Parameters ---------- amplitude : float Amplitude x_0 : float x position of the center of the box function x_width : float Width in x direction of the box y_0 : float y position of the center of the box function y_width : float Width in y direction of the box See Also -------- Box1D, Gaussian2D, Moffat2D Notes ----- Model formula: .. math:: f(x, y) = \\left \\{ \\begin{array}{ll} A : & x_0 - w_x/2 \\leq x \\leq x_0 + w_x/2 \\text{ and} \\\\ & y_0 - w_y/2 \\leq y \\leq y_0 + w_y/2 \\\\ 0 : & \\text{else} \\end{array} \\right. """ amplitude = Parameter(default=1, description="Amplitude", mag=True) x_0 = Parameter( default=0, description="X position of the center of the box function" ) y_0 = Parameter( default=0, description="Y position of the center of the box function" ) x_width = Parameter(default=1, description="Width in x direction of the box") y_width = Parameter(default=1, description="Width in y direction of the box") @staticmethod def evaluate(x, y, amplitude, x_0, y_0, x_width, y_width): """Two dimensional Box model function.""" x_range = np.logical_and(x >= x_0 - x_width / 2.0, x <= x_0 + x_width / 2.0) y_range = np.logical_and(y >= y_0 - y_width / 2.0, y <= y_0 + y_width / 2.0) result = np.select([np.logical_and(x_range, y_range)], [amplitude], 0) if isinstance(amplitude, Quantity): return Quantity(result, unit=amplitude.unit, copy=False, subok=True) return result @property def bounding_box(self): """ Tuple defining the default ``bounding_box``. ``((y_low, y_high), (x_low, x_high))`` """ dx = self.x_width / 2 dy = self.y_width / 2 return ((self.y_0 - dy, self.y_0 + dy), (self.x_0 - dx, self.x_0 + dx)) @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "x_0": inputs_unit[self.inputs[0]], "y_0": inputs_unit[self.inputs[1]], "x_width": inputs_unit[self.inputs[0]], "y_width": inputs_unit[self.inputs[1]], "amplitude": outputs_unit[self.outputs[0]], } class Trapezoid1D(Fittable1DModel): """ One dimensional Trapezoid model. Parameters ---------- amplitude : float Amplitude of the trapezoid x_0 : float Center position of the trapezoid width : float Width of the constant part of the trapezoid. slope : float Slope of the tails of the trapezoid See Also -------- Box1D, Gaussian1D, Moffat1D Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Trapezoid1D plt.figure() s1 = Trapezoid1D() r = np.arange(-5, 5, .01) for factor in range(1, 4): s1.amplitude = factor s1.width = factor plt.plot(r, s1(r), color=str(0.25 * factor), lw=2) plt.axis([-5, 5, -1, 4]) plt.show() """ amplitude = Parameter(default=1, description="Amplitude of the trapezoid") x_0 = Parameter(default=0, description="Center position of the trapezoid") width = Parameter(default=1, description="Width of constant part of the trapezoid") slope = Parameter(default=1, description="Slope of the tails of trapezoid") @staticmethod def evaluate(x, amplitude, x_0, width, slope): """One dimensional Trapezoid model function.""" # Compute the four points where the trapezoid changes slope # x1 <= x2 <= x3 <= x4 x2 = x_0 - width / 2.0 x3 = x_0 + width / 2.0 x1 = x2 - amplitude / slope x4 = x3 + amplitude / slope # Compute model values in pieces between the change points range_a = np.logical_and(x >= x1, x < x2) range_b = np.logical_and(x >= x2, x < x3) range_c = np.logical_and(x >= x3, x < x4) val_a = slope * (x - x1) val_b = amplitude val_c = slope * (x4 - x) result = np.select([range_a, range_b, range_c], [val_a, val_b, val_c]) if isinstance(amplitude, Quantity): return Quantity(result, unit=amplitude.unit, copy=False, subok=True) return result @property def bounding_box(self): """ Tuple defining the default ``bounding_box`` limits. ``(x_low, x_high))`` """ dx = self.width / 2 + self.amplitude / self.slope return (self.x_0 - dx, self.x_0 + dx) @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "x_0": inputs_unit[self.inputs[0]], "width": inputs_unit[self.inputs[0]], "slope": outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]], "amplitude": outputs_unit[self.outputs[0]], } class TrapezoidDisk2D(Fittable2DModel): """ Two dimensional circular Trapezoid model. Parameters ---------- amplitude : float Amplitude of the trapezoid x_0 : float x position of the center of the trapezoid y_0 : float y position of the center of the trapezoid R_0 : float Radius of the constant part of the trapezoid. slope : float Slope of the tails of the trapezoid in x direction. See Also -------- Disk2D, Box2D """ amplitude = Parameter(default=1, description="Amplitude of the trapezoid") x_0 = Parameter(default=0, description="X position of the center of the trapezoid") y_0 = Parameter(default=0, description="Y position of the center of the trapezoid") R_0 = Parameter(default=1, description="Radius of constant part of trapezoid") slope = Parameter( default=1, description="Slope of tails of trapezoid in x direction" ) @staticmethod def evaluate(x, y, amplitude, x_0, y_0, R_0, slope): """Two dimensional Trapezoid Disk model function.""" r = np.sqrt((x - x_0) ** 2 + (y - y_0) ** 2) range_1 = r <= R_0 range_2 = np.logical_and(r > R_0, r <= R_0 + amplitude / slope) val_1 = amplitude val_2 = amplitude + slope * (R_0 - r) result = np.select([range_1, range_2], [val_1, val_2]) if isinstance(amplitude, Quantity): return Quantity(result, unit=amplitude.unit, copy=False, subok=True) return result @property def bounding_box(self): """ Tuple defining the default ``bounding_box``. ``((y_low, y_high), (x_low, x_high))`` """ dr = self.R_0 + self.amplitude / self.slope return ((self.y_0 - dr, self.y_0 + dr), (self.x_0 - dr, self.x_0 + dr)) @property def input_units(self): if self.x_0.unit is None and self.y_0.unit is None: return None return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): # Note that here we need to make sure that x and y are in the same # units otherwise this can lead to issues since rotation is not well # defined. if inputs_unit["x"] != inputs_unit["y"]: raise UnitsError("Units of 'x' and 'y' inputs should match") return { "x_0": inputs_unit[self.inputs[0]], "y_0": inputs_unit[self.inputs[0]], "R_0": inputs_unit[self.inputs[0]], "slope": outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]], "amplitude": outputs_unit[self.outputs[0]], } class RickerWavelet1D(Fittable1DModel): """ One dimensional Ricker Wavelet model (sometimes known as a "Mexican Hat" model). .. note:: See https://github.com/astropy/astropy/pull/9445 for discussions related to renaming of this model. Parameters ---------- amplitude : float Amplitude x_0 : float Position of the peak sigma : float Width of the Ricker wavelet See Also -------- RickerWavelet2D, Box1D, Gaussian1D, Trapezoid1D Notes ----- Model formula: .. math:: f(x) = {A \\left(1 - \\frac{\\left(x - x_{0}\\right)^{2}}{\\sigma^{2}}\\right) e^{- \\frac{\\left(x - x_{0}\\right)^{2}}{2 \\sigma^{2}}}} Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import RickerWavelet1D plt.figure() s1 = RickerWavelet1D() r = np.arange(-5, 5, .01) for factor in range(1, 4): s1.amplitude = factor s1.width = factor plt.plot(r, s1(r), color=str(0.25 * factor), lw=2) plt.axis([-5, 5, -2, 4]) plt.show() """ amplitude = Parameter(default=1, description="Amplitude (peak) value") x_0 = Parameter(default=0, description="Position of the peak") sigma = Parameter(default=1, description="Width of the Ricker wavelet") @staticmethod def evaluate(x, amplitude, x_0, sigma): """One dimensional Ricker Wavelet model function.""" xx_ww = (x - x_0) ** 2 / (2 * sigma**2) return amplitude * (1 - 2 * xx_ww) * np.exp(-xx_ww) def bounding_box(self, factor=10.0): """Tuple defining the default ``bounding_box`` limits, ``(x_low, x_high)``. Parameters ---------- factor : float The multiple of sigma used to define the limits. """ x0 = self.x_0 dx = factor * self.sigma return (x0 - dx, x0 + dx) @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "x_0": inputs_unit[self.inputs[0]], "sigma": inputs_unit[self.inputs[0]], "amplitude": outputs_unit[self.outputs[0]], } class RickerWavelet2D(Fittable2DModel): """ Two dimensional Ricker Wavelet model (sometimes known as a "Mexican Hat" model). .. note:: See https://github.com/astropy/astropy/pull/9445 for discussions related to renaming of this model. Parameters ---------- amplitude : float Amplitude x_0 : float x position of the peak y_0 : float y position of the peak sigma : float Width of the Ricker wavelet See Also -------- RickerWavelet1D, Gaussian2D Notes ----- Model formula: .. math:: f(x, y) = A \\left(1 - \\frac{\\left(x - x_{0}\\right)^{2} + \\left(y - y_{0}\\right)^{2}}{\\sigma^{2}}\\right) e^{\\frac{- \\left(x - x_{0}\\right)^{2} - \\left(y - y_{0}\\right)^{2}}{2 \\sigma^{2}}} """ amplitude = Parameter(default=1, description="Amplitude (peak) value") x_0 = Parameter(default=0, description="X position of the peak") y_0 = Parameter(default=0, description="Y position of the peak") sigma = Parameter(default=1, description="Width of the Ricker wavelet") @staticmethod def evaluate(x, y, amplitude, x_0, y_0, sigma): """Two dimensional Ricker Wavelet model function.""" rr_ww = ((x - x_0) ** 2 + (y - y_0) ** 2) / (2 * sigma**2) return amplitude * (1 - rr_ww) * np.exp(-rr_ww) @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): # Note that here we need to make sure that x and y are in the same # units otherwise this can lead to issues since rotation is not well # defined. if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]: raise UnitsError("Units of 'x' and 'y' inputs should match") return { "x_0": inputs_unit[self.inputs[0]], "y_0": inputs_unit[self.inputs[0]], "sigma": inputs_unit[self.inputs[0]], "amplitude": outputs_unit[self.outputs[0]], } class AiryDisk2D(Fittable2DModel): """ Two dimensional Airy disk model. Parameters ---------- amplitude : float Amplitude of the Airy function. x_0 : float x position of the maximum of the Airy function. y_0 : float y position of the maximum of the Airy function. radius : float The radius of the Airy disk (radius of the first zero). See Also -------- Box2D, TrapezoidDisk2D, Gaussian2D Notes ----- Model formula: .. math:: f(r) = A \\left[ \\frac{2 J_1(\\frac{\\pi r}{R/R_z})}{\\frac{\\pi r}{R/R_z}} \\right]^2 Where :math:`J_1` is the first order Bessel function of the first kind, :math:`r` is radial distance from the maximum of the Airy function (:math:`r = \\sqrt{(x - x_0)^2 + (y - y_0)^2}`), :math:`R` is the input ``radius`` parameter, and :math:`R_z = 1.2196698912665045`). For an optical system, the radius of the first zero represents the limiting angular resolution and is approximately 1.22 * lambda / D, where lambda is the wavelength of the light and D is the diameter of the aperture. See [1]_ for more details about the Airy disk. References ---------- .. [1] https://en.wikipedia.org/wiki/Airy_disk """ amplitude = Parameter( default=1, description="Amplitude (peak value) of the Airy function" ) x_0 = Parameter(default=0, description="X position of the peak") y_0 = Parameter(default=0, description="Y position of the peak") radius = Parameter( default=1, description="The radius of the Airy disk (radius of first zero crossing)", ) _rz = None _j1 = None @classmethod def evaluate(cls, x, y, amplitude, x_0, y_0, radius): """Two dimensional Airy model function.""" if cls._rz is None: from scipy.special import j1, jn_zeros cls._rz = jn_zeros(1, 1)[0] / np.pi cls._j1 = j1 r = np.sqrt((x - x_0) ** 2 + (y - y_0) ** 2) / (radius / cls._rz) if isinstance(r, Quantity): # scipy function cannot handle Quantity, so turn into array. r = r.to_value(u.dimensionless_unscaled) # Since r can be zero, we have to take care to treat that case # separately so as not to raise a numpy warning z = np.ones(r.shape) rt = np.pi * r[r > 0] z[r > 0] = (2.0 * cls._j1(rt) / rt) ** 2 if isinstance(amplitude, Quantity): # make z quantity too, otherwise in-place multiplication fails. z = Quantity(z, u.dimensionless_unscaled, copy=False, subok=True) z *= amplitude return z @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): # Note that here we need to make sure that x and y are in the same # units otherwise this can lead to issues since rotation is not well # defined. if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]: raise UnitsError("Units of 'x' and 'y' inputs should match") return { "x_0": inputs_unit[self.inputs[0]], "y_0": inputs_unit[self.inputs[0]], "radius": inputs_unit[self.inputs[0]], "amplitude": outputs_unit[self.outputs[0]], } class Moffat1D(Fittable1DModel): """ One dimensional Moffat model. Parameters ---------- amplitude : float Amplitude of the model. x_0 : float x position of the maximum of the Moffat model. gamma : float Core width of the Moffat model. alpha : float Power index of the Moffat model. See Also -------- Gaussian1D, Box1D Notes ----- Model formula: .. math:: f(x) = A \\left(1 + \\frac{\\left(x - x_{0}\\right)^{2}}{\\gamma^{2}}\\right)^{- \\alpha} Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Moffat1D plt.figure() s1 = Moffat1D() r = np.arange(-5, 5, .01) for factor in range(1, 4): s1.amplitude = factor s1.width = factor plt.plot(r, s1(r), color=str(0.25 * factor), lw=2) plt.axis([-5, 5, -1, 4]) plt.show() """ amplitude = Parameter(default=1, description="Amplitude of the model") x_0 = Parameter(default=0, description="X position of maximum of Moffat model") gamma = Parameter(default=1, description="Core width of Moffat model") alpha = Parameter(default=1, description="Power index of the Moffat model") @property def fwhm(self): """ Moffat full width at half maximum. Derivation of the formula is available in `this notebook by Yoonsoo Bach <https://nbviewer.jupyter.org/github/ysbach/AO_2017/blob/master/04_Ground_Based_Concept.ipynb#1.2.-Moffat>`_. """ return 2.0 * np.abs(self.gamma) * np.sqrt(2.0 ** (1.0 / self.alpha) - 1.0) @staticmethod def evaluate(x, amplitude, x_0, gamma, alpha): """One dimensional Moffat model function.""" return amplitude * (1 + ((x - x_0) / gamma) ** 2) ** (-alpha) @staticmethod def fit_deriv(x, amplitude, x_0, gamma, alpha): """One dimensional Moffat model derivative with respect to parameters.""" fac = 1 + (x - x_0) ** 2 / gamma**2 d_A = fac ** (-alpha) d_x_0 = 2 * amplitude * alpha * (x - x_0) * d_A / (fac * gamma**2) d_gamma = 2 * amplitude * alpha * (x - x_0) ** 2 * d_A / (fac * gamma**3) d_alpha = -amplitude * d_A * np.log(fac) return [d_A, d_x_0, d_gamma, d_alpha] @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "x_0": inputs_unit[self.inputs[0]], "gamma": inputs_unit[self.inputs[0]], "amplitude": outputs_unit[self.outputs[0]], } class Moffat2D(Fittable2DModel): """ Two dimensional Moffat model. Parameters ---------- amplitude : float Amplitude of the model. x_0 : float x position of the maximum of the Moffat model. y_0 : float y position of the maximum of the Moffat model. gamma : float Core width of the Moffat model. alpha : float Power index of the Moffat model. See Also -------- Gaussian2D, Box2D Notes ----- Model formula: .. math:: f(x, y) = A \\left(1 + \\frac{\\left(x - x_{0}\\right)^{2} + \\left(y - y_{0}\\right)^{2}}{\\gamma^{2}}\\right)^{- \\alpha} """ amplitude = Parameter(default=1, description="Amplitude (peak value) of the model") x_0 = Parameter( default=0, description="X position of the maximum of the Moffat model" ) y_0 = Parameter( default=0, description="Y position of the maximum of the Moffat model" ) gamma = Parameter(default=1, description="Core width of the Moffat model") alpha = Parameter(default=1, description="Power index of the Moffat model") @property def fwhm(self): """ Moffat full width at half maximum. Derivation of the formula is available in `this notebook by Yoonsoo Bach <https://nbviewer.jupyter.org/github/ysbach/AO_2017/blob/master/04_Ground_Based_Concept.ipynb#1.2.-Moffat>`_. """ return 2.0 * np.abs(self.gamma) * np.sqrt(2.0 ** (1.0 / self.alpha) - 1.0) @staticmethod def evaluate(x, y, amplitude, x_0, y_0, gamma, alpha): """Two dimensional Moffat model function.""" rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma**2 return amplitude * (1 + rr_gg) ** (-alpha) @staticmethod def fit_deriv(x, y, amplitude, x_0, y_0, gamma, alpha): """Two dimensional Moffat model derivative with respect to parameters.""" rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma**2 d_A = (1 + rr_gg) ** (-alpha) d_x_0 = 2 * amplitude * alpha * d_A * (x - x_0) / (gamma**2 * (1 + rr_gg)) d_y_0 = 2 * amplitude * alpha * d_A * (y - y_0) / (gamma**2 * (1 + rr_gg)) d_alpha = -amplitude * d_A * np.log(1 + rr_gg) d_gamma = 2 * amplitude * alpha * d_A * rr_gg / (gamma * (1 + rr_gg)) return [d_A, d_x_0, d_y_0, d_gamma, d_alpha] @property def input_units(self): if self.x_0.unit is None: return None else: return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): # Note that here we need to make sure that x and y are in the same # units otherwise this can lead to issues since rotation is not well # defined. if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]: raise UnitsError("Units of 'x' and 'y' inputs should match") return { "x_0": inputs_unit[self.inputs[0]], "y_0": inputs_unit[self.inputs[0]], "gamma": inputs_unit[self.inputs[0]], "amplitude": outputs_unit[self.outputs[0]], } class Sersic2D(Fittable2DModel): r""" Two dimensional Sersic surface brightness profile. Parameters ---------- amplitude : float Surface brightness at r_eff. r_eff : float Effective (half-light) radius n : float Sersic Index. x_0 : float, optional x position of the center. y_0 : float, optional y position of the center. ellip : float, optional Ellipticity. theta : float or `~astropy.units.Quantity`, optional The rotation angle as an angular quantity (`~astropy.units.Quantity` or `~astropy.coordinates.Angle`) or a value in radians (as a float). The rotation angle increases counterclockwise from the positive x axis. See Also -------- Gaussian2D, Moffat2D Notes ----- Model formula: .. math:: I(x,y) = I(r) = I_e\exp\left\{ -b_n\left[\left(\frac{r}{r_{e}}\right)^{(1/n)}-1\right] \right\} The constant :math:`b_n` is defined such that :math:`r_e` contains half the total luminosity, and can be solved for numerically. .. math:: \Gamma(2n) = 2\gamma (2n,b_n) Examples -------- .. plot:: :include-source: import numpy as np from astropy.modeling.models import Sersic2D import matplotlib.pyplot as plt x,y = np.meshgrid(np.arange(100), np.arange(100)) mod = Sersic2D(amplitude = 1, r_eff = 25, n=4, x_0=50, y_0=50, ellip=.5, theta=-1) img = mod(x, y) log_img = np.log10(img) plt.figure() plt.imshow(log_img, origin='lower', interpolation='nearest', vmin=-1, vmax=2) plt.xlabel('x') plt.ylabel('y') cbar = plt.colorbar() cbar.set_label('Log Brightness', rotation=270, labelpad=25) cbar.set_ticks([-1, 0, 1, 2]) plt.show() References ---------- .. [1] http://ned.ipac.caltech.edu/level5/March05/Graham/Graham2.html """ amplitude = Parameter(default=1, description="Surface brightness at r_eff") r_eff = Parameter(default=1, description="Effective (half-light) radius") n = Parameter(default=4, description="Sersic Index") x_0 = Parameter(default=0, description="X position of the center") y_0 = Parameter(default=0, description="Y position of the center") ellip = Parameter(default=0, description="Ellipticity") theta = Parameter( default=0.0, description=( "Rotation angle either as a float (in radians) or a |Quantity| angle" ), ) _gammaincinv = None @classmethod def evaluate(cls, x, y, amplitude, r_eff, n, x_0, y_0, ellip, theta): """Two dimensional Sersic profile function.""" if cls._gammaincinv is None: from scipy.special import gammaincinv cls._gammaincinv = gammaincinv bn = cls._gammaincinv(2.0 * n, 0.5) a, b = r_eff, (1 - ellip) * r_eff cos_theta, sin_theta = np.cos(theta), np.sin(theta) x_maj = (x - x_0) * cos_theta + (y - y_0) * sin_theta x_min = -(x - x_0) * sin_theta + (y - y_0) * cos_theta z = np.sqrt((x_maj / a) ** 2 + (x_min / b) ** 2) return amplitude * np.exp(-bn * (z ** (1 / n) - 1)) @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit, self.inputs[1]: self.y_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): # Note that here we need to make sure that x and y are in the same # units otherwise this can lead to issues since rotation is not well # defined. if inputs_unit[self.inputs[0]] != inputs_unit[self.inputs[1]]: raise UnitsError("Units of 'x' and 'y' inputs should match") return { "x_0": inputs_unit[self.inputs[0]], "y_0": inputs_unit[self.inputs[0]], "r_eff": inputs_unit[self.inputs[0]], "theta": u.rad, "amplitude": outputs_unit[self.outputs[0]], } class KingProjectedAnalytic1D(Fittable1DModel): """ Projected (surface density) analytic King Model. Parameters ---------- amplitude : float Amplitude or scaling factor. r_core : float Core radius (f(r_c) ~ 0.5 f_0) r_tide : float Tidal radius. Notes ----- This model approximates a King model with an analytic function. The derivation of this equation can be found in King '62 (equation 14). This is just an approximation of the full model and the parameters derived from this model should be taken with caution. It usually works for models with a concentration (c = log10(r_t/r_c) parameter < 2. Model formula: .. math:: f(x) = A r_c^2 \\left(\\frac{1}{\\sqrt{(x^2 + r_c^2)}} - \\frac{1}{\\sqrt{(r_t^2 + r_c^2)}}\\right)^2 Examples -------- .. plot:: :include-source: import numpy as np from astropy.modeling.models import KingProjectedAnalytic1D import matplotlib.pyplot as plt plt.figure() rt_list = [1, 2, 5, 10, 20] for rt in rt_list: r = np.linspace(0.1, rt, 100) mod = KingProjectedAnalytic1D(amplitude = 1, r_core = 1., r_tide = rt) sig = mod(r) plt.loglog(r, sig/sig[0], label=f"c ~ {mod.concentration:0.2f}") plt.xlabel("r") plt.ylabel(r"$\\sigma/\\sigma_0$") plt.legend() plt.show() References ---------- .. [1] https://ui.adsabs.harvard.edu/abs/1962AJ.....67..471K """ amplitude = Parameter( default=1, bounds=(FLOAT_EPSILON, None), description="Amplitude or scaling factor", ) r_core = Parameter( default=1, bounds=(FLOAT_EPSILON, None), description="Core Radius" ) r_tide = Parameter( default=2, bounds=(FLOAT_EPSILON, None), description="Tidal Radius" ) @property def concentration(self): """Concentration parameter of the king model.""" return np.log10(np.abs(self.r_tide / self.r_core)) @staticmethod def _core_func(x, r_core, r_tide, power=1): return ( 1.0 / np.sqrt(x**2 + r_core**2) ** power - 1.0 / np.sqrt(r_tide**2 + r_core**2) ** power ) @staticmethod def _filter(x, r_tide, result): """Set invalid r values to 0""" bounds = (x >= r_tide) | (x < 0) result[bounds] = result[bounds] * 0.0 def evaluate(self, x, amplitude, r_core, r_tide): """ Analytic King model function. """ result = amplitude * r_core**2 * self._core_func(x, r_core, r_tide) ** 2 self._filter(x, r_tide, result) return result def fit_deriv(self, x, amplitude, r_core, r_tide): """ Analytic King model function derivatives. """ d_amplitude = r_core**2 * self._core_func(x, r_core, r_tide) ** 2 self._filter(x, r_tide, d_amplitude) d_r_core = ( -2.0 * amplitude * r_core**3 * self._core_func(x, r_core, r_tide, power=3) * self._core_func(x, r_core, r_tide) + 2 * amplitude * r_core * self._core_func(x, r_core, r_tide) ** 2 ) self._filter(x, r_tide, d_r_core) d_r_tide = ( 2 * amplitude * r_core**2 * r_tide * self._core_func(x, r_core, r_tide) ) / (r_core**2 + r_tide**2) ** (3 / 2) self._filter(x, r_tide, d_r_tide) return [d_amplitude, d_r_core, d_r_tide] @property def bounding_box(self): """ Tuple defining the default ``bounding_box`` limits. The model is not defined for r > r_tide. ``(r_low, r_high)`` """ return (0 * self.r_tide, 1 * self.r_tide) @property def input_units(self): if self.r_core.unit is None: return None return {self.inputs[0]: self.r_core.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "r_core": inputs_unit[self.inputs[0]], "r_tide": inputs_unit[self.inputs[0]], "amplitude": outputs_unit[self.outputs[0]], } class Logarithmic1D(Fittable1DModel): """ One dimensional logarithmic model. Parameters ---------- amplitude : float, optional tau : float, optional See Also -------- Exponential1D, Gaussian1D """ amplitude = Parameter(default=1) tau = Parameter(default=1) @staticmethod def evaluate(x, amplitude, tau): return amplitude * np.log(x / tau) @staticmethod def fit_deriv(x, amplitude, tau): d_amplitude = np.log(x / tau) d_tau = np.zeros(x.shape) - (amplitude / tau) return [d_amplitude, d_tau] @property def inverse(self): new_amplitude = self.tau new_tau = self.amplitude return Exponential1D(amplitude=new_amplitude, tau=new_tau) @tau.validator def tau(self, val): if np.all(val == 0): raise ValueError("0 is not an allowed value for tau") @property def input_units(self): if self.tau.unit is None: return None return {self.inputs[0]: self.tau.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "tau": inputs_unit[self.inputs[0]], "amplitude": outputs_unit[self.outputs[0]], } class Exponential1D(Fittable1DModel): """ One dimensional exponential model. Parameters ---------- amplitude : float, optional tau : float, optional See Also -------- Logarithmic1D, Gaussian1D """ amplitude = Parameter(default=1) tau = Parameter(default=1) @staticmethod def evaluate(x, amplitude, tau): return amplitude * np.exp(x / tau) @staticmethod def fit_deriv(x, amplitude, tau): """Derivative with respect to parameters.""" d_amplitude = np.exp(x / tau) d_tau = -amplitude * (x / tau**2) * np.exp(x / tau) return [d_amplitude, d_tau] @property def inverse(self): new_amplitude = self.tau new_tau = self.amplitude return Logarithmic1D(amplitude=new_amplitude, tau=new_tau) @tau.validator def tau(self, val): """tau cannot be 0.""" if np.all(val == 0): raise ValueError("0 is not an allowed value for tau") @property def input_units(self): if self.tau.unit is None: return None return {self.inputs[0]: self.tau.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "tau": inputs_unit[self.inputs[0]], "amplitude": outputs_unit[self.outputs[0]], }
0afc396a0f6634996cf82511ee3e58fb7bd779e8ca7f2131fb4b3b62a56aa98d
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This module contains models representing polynomials and polynomial series. """ # pylint: disable=invalid-name from math import comb import numpy as np from astropy.utils import check_broadcast, indent from .core import FittableModel, Model from .functional_models import Shift from .parameters import Parameter from .utils import _validate_domain_window, poly_map_domain __all__ = [ "Chebyshev1D", "Chebyshev2D", "Hermite1D", "Hermite2D", "InverseSIP", "Legendre1D", "Legendre2D", "Polynomial1D", "Polynomial2D", "SIP", "OrthoPolynomialBase", "PolynomialModel", ] class PolynomialBase(FittableModel): """ Base class for all polynomial-like models with an arbitrary number of parameters in the form of coefficients. In this case Parameter instances are returned through the class's ``__getattr__`` rather than through class descriptors. """ # Default _param_names list; this will be filled in by the implementation's # __init__ _param_names = () linear = True col_fit_deriv = False @property def param_names(self): """Coefficient names generated based on the model's polynomial degree and number of dimensions. Subclasses should implement this to return parameter names in the desired format. On most `Model` classes this is a class attribute, but for polynomial models it is an instance attribute since each polynomial model instance can have different parameters depending on the degree of the polynomial and the number of dimensions, for example. """ return self._param_names class PolynomialModel(PolynomialBase): """ Base class for polynomial models. Its main purpose is to determine how many coefficients are needed based on the polynomial order and dimension and to provide their default values, names and ordering. """ def __init__( self, degree, n_models=None, model_set_axis=None, name=None, meta=None, **params ): self._degree = degree self._order = self.get_num_coeff(self.n_inputs) self._param_names = self._generate_coeff_names(self.n_inputs) if n_models: if model_set_axis is None: model_set_axis = 0 minshape = (1,) * model_set_axis + (n_models,) else: minshape = () for param_name in self._param_names: self._parameters_[param_name] = Parameter( param_name, default=np.zeros(minshape) ) super().__init__( n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params, ) @property def degree(self): """Degree of polynomial.""" return self._degree def get_num_coeff(self, ndim): """ Return the number of coefficients in one parameter set. """ if self.degree < 0: raise ValueError("Degree of polynomial must be positive or null") # deg+1 is used to account for the difference between iraf using # degree and numpy using exact degree if ndim != 1: nmixed = comb(self.degree, ndim) else: nmixed = 0 numc = self.degree * ndim + nmixed + 1 return numc def _invlex(self): c = [] lencoeff = self.degree + 1 for i in range(lencoeff): for j in range(lencoeff): if i + j <= self.degree: c.append((j, i)) return c[::-1] def _generate_coeff_names(self, ndim): names = [] if ndim == 1: for n in range(self._order): names.append(f"c{n}") else: for i in range(self.degree + 1): names.append(f"c{i}_{0}") for i in range(1, self.degree + 1): names.append(f"c{0}_{i}") for i in range(1, self.degree): for j in range(1, self.degree): if i + j < self.degree + 1: names.append(f"c{i}_{j}") return tuple(names) class _PolyDomainWindow1D(PolynomialModel): """ This class sets ``domain`` and ``window`` of 1D polynomials. """ def __init__( self, degree, domain=None, window=None, n_models=None, model_set_axis=None, name=None, meta=None, **params, ): super().__init__( degree, n_models, model_set_axis, name=name, meta=meta, **params ) self._set_default_domain_window(domain, window) @property def window(self): return self._window @window.setter def window(self, val): self._window = _validate_domain_window(val) @property def domain(self): return self._domain @domain.setter def domain(self, val): self._domain = _validate_domain_window(val) def _set_default_domain_window(self, domain, window): """ This method sets the ``domain`` and ``window`` attributes on 1D subclasses. """ self._default_domain_window = {"domain": None, "window": (-1, 1)} self.window = window or (-1, 1) self.domain = domain def __repr__(self): return self._format_repr( [self.degree], kwargs={"domain": self.domain, "window": self.window}, defaults=self._default_domain_window, ) def __str__(self): return self._format_str( [("Degree", self.degree), ("Domain", self.domain), ("Window", self.window)], self._default_domain_window, ) class OrthoPolynomialBase(PolynomialBase): """ This is a base class for the 2D Chebyshev and Legendre models. The polynomials implemented here require a maximum degree in x and y. For explanation of ``x_domain``, ``y_domain``, ```x_window`` and ```y_window`` see :ref:`Notes regarding usage of domain and window <astropy:domain-window-note>`. Parameters ---------- x_degree : int degree in x y_degree : int degree in y x_domain : tuple or None, optional domain of the x independent variable x_window : tuple or None, optional range of the x independent variable y_domain : tuple or None, optional domain of the y independent variable y_window : tuple or None, optional range of the y independent variable **params : dict {keyword: value} pairs, representing {parameter_name: value} """ n_inputs = 2 n_outputs = 1 def __init__( self, x_degree, y_degree, x_domain=None, x_window=None, y_domain=None, y_window=None, n_models=None, model_set_axis=None, name=None, meta=None, **params, ): self.x_degree = x_degree self.y_degree = y_degree self._order = self.get_num_coeff() # Set the ``x/y_domain`` and ``x/y_wndow`` attributes in subclasses. self._default_domain_window = { "x_window": (-1, 1), "y_window": (-1, 1), "x_domain": None, "y_domain": None, } self.x_window = x_window or self._default_domain_window["x_window"] self.y_window = y_window or self._default_domain_window["y_window"] self.x_domain = x_domain self.y_domain = y_domain self._param_names = self._generate_coeff_names() if n_models: if model_set_axis is None: model_set_axis = 0 minshape = (1,) * model_set_axis + (n_models,) else: minshape = () for param_name in self._param_names: self._parameters_[param_name] = Parameter( param_name, default=np.zeros(minshape) ) super().__init__( n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params, ) @property def x_domain(self): return self._x_domain @x_domain.setter def x_domain(self, val): self._x_domain = _validate_domain_window(val) @property def y_domain(self): return self._y_domain @y_domain.setter def y_domain(self, val): self._y_domain = _validate_domain_window(val) @property def x_window(self): return self._x_window @x_window.setter def x_window(self, val): self._x_window = _validate_domain_window(val) @property def y_window(self): return self._y_window @y_window.setter def y_window(self, val): self._y_window = _validate_domain_window(val) def __repr__(self): return self._format_repr( [self.x_degree, self.y_degree], kwargs={ "x_domain": self.x_domain, "y_domain": self.y_domain, "x_window": self.x_window, "y_window": self.y_window, }, defaults=self._default_domain_window, ) def __str__(self): return self._format_str( [ ("X_Degree", self.x_degree), ("Y_Degree", self.y_degree), ("X_Domain", self.x_domain), ("Y_Domain", self.y_domain), ("X_Window", self.x_window), ("Y_Window", self.y_window), ], self._default_domain_window, ) def get_num_coeff(self): """ Determine how many coefficients are needed. Returns ------- numc : int number of coefficients """ if self.x_degree < 0 or self.y_degree < 0: raise ValueError("Degree of polynomial must be positive or null") return (self.x_degree + 1) * (self.y_degree + 1) def _invlex(self): # TODO: This is a very slow way to do this; fix it and related methods # like _alpha c = [] xvar = np.arange(self.x_degree + 1) yvar = np.arange(self.y_degree + 1) for j in yvar: for i in xvar: c.append((i, j)) return np.array(c[::-1]) def invlex_coeff(self, coeffs): invlex_coeffs = [] xvar = np.arange(self.x_degree + 1) yvar = np.arange(self.y_degree + 1) for j in yvar: for i in xvar: name = f"c{i}_{j}" coeff = coeffs[self.param_names.index(name)] invlex_coeffs.append(coeff) return np.array(invlex_coeffs[::-1]) def _alpha(self): invlexdeg = self._invlex() invlexdeg[:, 1] = invlexdeg[:, 1] + self.x_degree + 1 nx = self.x_degree + 1 ny = self.y_degree + 1 alpha = np.zeros((ny * nx + 3, ny + nx)) for n in range(len(invlexdeg)): alpha[n][invlexdeg[n]] = [1, 1] alpha[-2, 0] = 1 alpha[-3, nx] = 1 return alpha def imhorner(self, x, y, coeff): _coeff = list(coeff) _coeff.extend([0, 0, 0]) alpha = self._alpha() r0 = _coeff[0] nalpha = len(alpha) karr = np.diff(alpha, axis=0) kfunc = self._fcache(x, y) x_terms = self.x_degree + 1 y_terms = self.y_degree + 1 nterms = x_terms + y_terms for n in range(1, nterms + 1 + 3): setattr(self, "r" + str(n), 0.0) for n in range(1, nalpha): k = karr[n - 1].nonzero()[0].max() + 1 rsum = 0 for i in range(1, k + 1): rsum = rsum + getattr(self, "r" + str(i)) val = kfunc[k - 1] * (r0 + rsum) setattr(self, "r" + str(k), val) r0 = _coeff[n] for i in range(1, k): setattr(self, "r" + str(i), 0.0) result = r0 for i in range(1, nterms + 1 + 3): result = result + getattr(self, "r" + str(i)) return result def _generate_coeff_names(self): names = [] for j in range(self.y_degree + 1): for i in range(self.x_degree + 1): names.append(f"c{i}_{j}") return tuple(names) def _fcache(self, x, y): """ Computation and store the individual functions. To be implemented by subclasses" """ raise NotImplementedError("Subclasses should implement this") def evaluate(self, x, y, *coeffs): if self.x_domain is not None: x = poly_map_domain(x, self.x_domain, self.x_window) if self.y_domain is not None: y = poly_map_domain(y, self.y_domain, self.y_window) invcoeff = self.invlex_coeff(coeffs) return self.imhorner(x, y, invcoeff) def prepare_inputs(self, x, y, **kwargs): inputs, broadcasted_shapes = super().prepare_inputs(x, y, **kwargs) x, y = inputs if x.shape != y.shape: raise ValueError("Expected input arrays to have the same shape") return (x, y), broadcasted_shapes class Chebyshev1D(_PolyDomainWindow1D): r""" Univariate Chebyshev series. It is defined as: .. math:: P(x) = \sum_{i=0}^{i=n}C_{i} * T_{i}(x) where ``T_i(x)`` is the corresponding Chebyshev polynomial of the 1st kind. For explanation of ```domain``, and ``window`` see :ref:`Notes regarding usage of domain and window <domain-window-note>`. Parameters ---------- degree : int degree of the series domain : tuple or None, optional window : tuple or None, optional If None, it is set to (-1, 1) Fitters will remap the domain to this window. **params : dict keyword : value pairs, representing parameter_name: value Notes ----- This model does not support the use of units/quantities, because each term in the sum of Chebyshev polynomials is a polynomial in x - since the coefficients within each Chebyshev polynomial are fixed, we can't use quantities for x since the units would not be compatible. For example, the third Chebyshev polynomial (T2) is 2x^2-1, but if x was specified with units, 2x^2 and -1 would have incompatible units. """ n_inputs = 1 n_outputs = 1 _separable = True def __init__( self, degree, domain=None, window=None, n_models=None, model_set_axis=None, name=None, meta=None, **params, ): super().__init__( degree, domain=domain, window=window, n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params, ) def fit_deriv(self, x, *params): """ Computes the Vandermonde matrix. Parameters ---------- x : ndarray input *params throw-away parameter list returned by non-linear fitters Returns ------- result : ndarray The Vandermonde matrix """ x = np.array(x, dtype=float, copy=False, ndmin=1) v = np.empty((self.degree + 1,) + x.shape, dtype=x.dtype) v[0] = 1 if self.degree > 0: x2 = 2 * x v[1] = x for i in range(2, self.degree + 1): v[i] = v[i - 1] * x2 - v[i - 2] return np.rollaxis(v, 0, v.ndim) def prepare_inputs(self, x, **kwargs): inputs, broadcasted_shapes = super().prepare_inputs(x, **kwargs) x = inputs[0] return (x,), broadcasted_shapes def evaluate(self, x, *coeffs): if self.domain is not None: x = poly_map_domain(x, self.domain, self.window) return self.clenshaw(x, coeffs) @staticmethod def clenshaw(x, coeffs): """Evaluates the polynomial using Clenshaw's algorithm.""" if len(coeffs) == 1: c0 = coeffs[0] c1 = 0 elif len(coeffs) == 2: c0 = coeffs[0] c1 = coeffs[1] else: x2 = 2 * x c0 = coeffs[-2] c1 = coeffs[-1] for i in range(3, len(coeffs) + 1): tmp = c0 c0 = coeffs[-i] - c1 c1 = tmp + c1 * x2 return c0 + c1 * x class Hermite1D(_PolyDomainWindow1D): r""" Univariate Hermite series. It is defined as: .. math:: P(x) = \sum_{i=0}^{i=n}C_{i} * H_{i}(x) where ``H_i(x)`` is the corresponding Hermite polynomial ("Physicist's kind"). For explanation of ``domain``, and ``window`` see :ref:`Notes regarding usage of domain and window <domain-window-note>`. Parameters ---------- degree : int degree of the series domain : tuple or None, optional window : tuple or None, optional If None, it is set to (-1, 1) Fitters will remap the domain to this window **params : dict keyword : value pairs, representing parameter_name: value Notes ----- This model does not support the use of units/quantities, because each term in the sum of Hermite polynomials is a polynomial in x - since the coefficients within each Hermite polynomial are fixed, we can't use quantities for x since the units would not be compatible. For example, the third Hermite polynomial (H2) is 4x^2-2, but if x was specified with units, 4x^2 and -2 would have incompatible units. """ n_inputs = 1 n_outputs = 1 _separable = True def __init__( self, degree, domain=None, window=None, n_models=None, model_set_axis=None, name=None, meta=None, **params, ): super().__init__( degree, domain, window, n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params, ) def fit_deriv(self, x, *params): """ Computes the Vandermonde matrix. Parameters ---------- x : ndarray input *params throw-away parameter list returned by non-linear fitters Returns ------- result : ndarray The Vandermonde matrix """ x = np.array(x, dtype=float, copy=False, ndmin=1) v = np.empty((self.degree + 1,) + x.shape, dtype=x.dtype) v[0] = 1 if self.degree > 0: x2 = 2 * x v[1] = 2 * x for i in range(2, self.degree + 1): v[i] = x2 * v[i - 1] - 2 * (i - 1) * v[i - 2] return np.rollaxis(v, 0, v.ndim) def prepare_inputs(self, x, **kwargs): inputs, broadcasted_shapes = super().prepare_inputs(x, **kwargs) x = inputs[0] return (x,), broadcasted_shapes def evaluate(self, x, *coeffs): if self.domain is not None: x = poly_map_domain(x, self.domain, self.window) return self.clenshaw(x, coeffs) @staticmethod def clenshaw(x, coeffs): x2 = x * 2 if len(coeffs) == 1: c0 = coeffs[0] c1 = 0 elif len(coeffs) == 2: c0 = coeffs[0] c1 = coeffs[1] else: nd = len(coeffs) c0 = coeffs[-2] c1 = coeffs[-1] for i in range(3, len(coeffs) + 1): temp = c0 nd = nd - 1 c0 = coeffs[-i] - c1 * (2 * (nd - 1)) c1 = temp + c1 * x2 return c0 + c1 * x2 class Hermite2D(OrthoPolynomialBase): r""" Bivariate Hermite series. It is defined as .. math:: P_{nm}(x,y) = \sum_{n,m=0}^{n=d,m=d}C_{nm} H_n(x) H_m(y) where ``H_n(x)`` and ``H_m(y)`` are Hermite polynomials. For explanation of ``x_domain``, ``y_domain``, ``x_window`` and ``y_window`` see :ref:`Notes regarding usage of domain and window <domain-window-note>`. Parameters ---------- x_degree : int degree in x y_degree : int degree in y x_domain : tuple or None, optional domain of the x independent variable y_domain : tuple or None, optional domain of the y independent variable x_window : tuple or None, optional range of the x independent variable If None, it is set to (-1, 1) Fitters will remap the domain to this window y_window : tuple or None, optional range of the y independent variable If None, it is set to (-1, 1) Fitters will remap the domain to this window **params : dict keyword: value pairs, representing parameter_name: value Notes ----- This model does not support the use of units/quantities, because each term in the sum of Hermite polynomials is a polynomial in x and/or y - since the coefficients within each Hermite polynomial are fixed, we can't use quantities for x and/or y since the units would not be compatible. For example, the third Hermite polynomial (H2) is 4x^2-2, but if x was specified with units, 4x^2 and -2 would have incompatible units. """ _separable = False def __init__( self, x_degree, y_degree, x_domain=None, x_window=None, y_domain=None, y_window=None, n_models=None, model_set_axis=None, name=None, meta=None, **params, ): super().__init__( x_degree, y_degree, x_domain=x_domain, y_domain=y_domain, x_window=x_window, y_window=y_window, n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params, ) def _fcache(self, x, y): """ Calculate the individual Hermite functions once and store them in a dictionary to be reused. """ x_terms = self.x_degree + 1 y_terms = self.y_degree + 1 kfunc = {} kfunc[0] = np.ones(x.shape) kfunc[1] = 2 * x.copy() kfunc[x_terms] = np.ones(y.shape) kfunc[x_terms + 1] = 2 * y.copy() for n in range(2, x_terms): kfunc[n] = 2 * x * kfunc[n - 1] - 2 * (n - 1) * kfunc[n - 2] for n in range(x_terms + 2, x_terms + y_terms): kfunc[n] = 2 * y * kfunc[n - 1] - 2 * (n - 1) * kfunc[n - 2] return kfunc def fit_deriv(self, x, y, *params): """ Derivatives with respect to the coefficients. This is an array with Hermite polynomials: .. math:: H_{x_0}H_{y_0}, H_{x_1}H_{y_0}...H_{x_n}H_{y_0}...H_{x_n}H_{y_m} Parameters ---------- x : ndarray input y : ndarray input *params throw-away parameter list returned by non-linear fitters Returns ------- result : ndarray The Vandermonde matrix """ if x.shape != y.shape: raise ValueError("x and y must have the same shape") x = x.flatten() y = y.flatten() x_deriv = self._hermderiv1d(x, self.x_degree + 1).T y_deriv = self._hermderiv1d(y, self.y_degree + 1).T ij = [] for i in range(self.y_degree + 1): for j in range(self.x_degree + 1): ij.append(x_deriv[j] * y_deriv[i]) v = np.array(ij) return v.T def _hermderiv1d(self, x, deg): """ Derivative of 1D Hermite series. """ x = np.array(x, dtype=float, copy=False, ndmin=1) d = np.empty((deg + 1, len(x)), dtype=x.dtype) d[0] = x * 0 + 1 if deg > 0: x2 = 2 * x d[1] = x2 for i in range(2, deg + 1): d[i] = x2 * d[i - 1] - 2 * (i - 1) * d[i - 2] return np.rollaxis(d, 0, d.ndim) class Legendre1D(_PolyDomainWindow1D): r""" Univariate Legendre series. It is defined as: .. math:: P(x) = \sum_{i=0}^{i=n}C_{i} * L_{i}(x) where ``L_i(x)`` is the corresponding Legendre polynomial. For explanation of ``domain``, and ``window`` see :ref:`Notes regarding usage of domain and window <domain-window-note>`. Parameters ---------- degree : int degree of the series domain : tuple or None, optional window : tuple or None, optional If None, it is set to (-1, 1) Fitters will remap the domain to this window **params : dict keyword: value pairs, representing parameter_name: value Notes ----- This model does not support the use of units/quantities, because each term in the sum of Legendre polynomials is a polynomial in x - since the coefficients within each Legendre polynomial are fixed, we can't use quantities for x since the units would not be compatible. For example, the third Legendre polynomial (P2) is 1.5x^2-0.5, but if x was specified with units, 1.5x^2 and -0.5 would have incompatible units. """ n_inputs = 1 n_outputs = 1 _separable = True def __init__( self, degree, domain=None, window=None, n_models=None, model_set_axis=None, name=None, meta=None, **params, ): super().__init__( degree, domain, window, n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params, ) def prepare_inputs(self, x, **kwargs): inputs, broadcasted_shapes = super().prepare_inputs(x, **kwargs) x = inputs[0] return (x,), broadcasted_shapes def evaluate(self, x, *coeffs): if self.domain is not None: x = poly_map_domain(x, self.domain, self.window) return self.clenshaw(x, coeffs) def fit_deriv(self, x, *params): """ Computes the Vandermonde matrix. Parameters ---------- x : ndarray input *params throw-away parameter list returned by non-linear fitters Returns ------- result : ndarray The Vandermonde matrix """ x = np.array(x, dtype=float, copy=False, ndmin=1) v = np.empty((self.degree + 1,) + x.shape, dtype=x.dtype) v[0] = 1 if self.degree > 0: v[1] = x for i in range(2, self.degree + 1): v[i] = (v[i - 1] * x * (2 * i - 1) - v[i - 2] * (i - 1)) / i return np.rollaxis(v, 0, v.ndim) @staticmethod def clenshaw(x, coeffs): if len(coeffs) == 1: c0 = coeffs[0] c1 = 0 elif len(coeffs) == 2: c0 = coeffs[0] c1 = coeffs[1] else: nd = len(coeffs) c0 = coeffs[-2] c1 = coeffs[-1] for i in range(3, len(coeffs) + 1): tmp = c0 nd = nd - 1 c0 = coeffs[-i] - (c1 * (nd - 1)) / nd c1 = tmp + (c1 * x * (2 * nd - 1)) / nd return c0 + c1 * x class Polynomial1D(_PolyDomainWindow1D): r""" 1D Polynomial model. It is defined as: .. math:: P = \sum_{i=0}^{i=n}C_{i} * x^{i} For explanation of ``domain``, and ``window`` see :ref:`Notes regarding usage of domain and window <domain-window-note>`. Parameters ---------- degree : int degree of the series domain : tuple or None, optional If None, it is set to (-1, 1) window : tuple or None, optional If None, it is set to (-1, 1) Fitters will remap the domain to this window **params : dict keyword: value pairs, representing parameter_name: value """ n_inputs = 1 n_outputs = 1 _separable = True def __init__( self, degree, domain=None, window=None, n_models=None, model_set_axis=None, name=None, meta=None, **params, ): super().__init__( degree, domain, window, n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params, ) # Set domain separately because it's different from # the orthogonal polynomials. self._default_domain_window = { "domain": (-1, 1), "window": (-1, 1), } self.domain = domain or self._default_domain_window["domain"] self.window = window or self._default_domain_window["window"] def prepare_inputs(self, x, **kwargs): inputs, broadcasted_shapes = super().prepare_inputs(x, **kwargs) x = inputs[0] return (x,), broadcasted_shapes def evaluate(self, x, *coeffs): if self.domain is not None: x = poly_map_domain(x, self.domain, self.window) return self.horner(x, coeffs) def fit_deriv(self, x, *params): """ Computes the Vandermonde matrix. Parameters ---------- x : ndarray input *params throw-away parameter list returned by non-linear fitters Returns ------- result : ndarray The Vandermonde matrix """ v = np.empty((self.degree + 1,) + x.shape, dtype=float) v[0] = 1 if self.degree > 0: v[1] = x for i in range(2, self.degree + 1): v[i] = v[i - 1] * x return np.rollaxis(v, 0, v.ndim) @staticmethod def horner(x, coeffs): if len(coeffs) == 1: c0 = coeffs[-1] * np.ones_like(x, subok=False) else: c0 = coeffs[-1] for i in range(2, len(coeffs) + 1): c0 = coeffs[-i] + c0 * x return c0 @property def input_units(self): if self.degree == 0 or self.c1.unit is None: return None else: return {self.inputs[0]: self.c0.unit / self.c1.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): mapping = {} for i in range(self.degree + 1): par = getattr(self, f"c{i}") mapping[par.name] = ( outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]] ** i ) return mapping class Polynomial2D(PolynomialModel): r""" 2D Polynomial model. Represents a general polynomial of degree n: .. math:: P(x,y) = c_{00} + c_{10}x + ...+ c_{n0}x^n + c_{01}y + ...+ c_{0n}y^n + c_{11}xy + c_{12}xy^2 + ... + c_{1(n-1)}xy^{n-1}+ ... + c_{(n-1)1}x^{n-1}y For explanation of ``x_domain``, ``y_domain``, ``x_window`` and ``y_window`` see :ref:`Notes regarding usage of domain and window <domain-window-note>`. Parameters ---------- degree : int Polynomial degree: largest sum of exponents (:math:`i + j`) of variables in each monomial term of the form :math:`x^i y^j`. The number of terms in a 2D polynomial of degree ``n`` is given by binomial coefficient :math:`C(n + 2, 2) = (n + 2)! / (2!\,n!) = (n + 1)(n + 2) / 2`. x_domain : tuple or None, optional domain of the x independent variable If None, it is set to (-1, 1) y_domain : tuple or None, optional domain of the y independent variable If None, it is set to (-1, 1) x_window : tuple or None, optional range of the x independent variable If None, it is set to (-1, 1) Fitters will remap the x_domain to x_window y_window : tuple or None, optional range of the y independent variable If None, it is set to (-1, 1) Fitters will remap the y_domain to y_window **params : dict keyword: value pairs, representing parameter_name: value """ n_inputs = 2 n_outputs = 1 _separable = False def __init__( self, degree, x_domain=None, y_domain=None, x_window=None, y_window=None, n_models=None, model_set_axis=None, name=None, meta=None, **params, ): super().__init__( degree, n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params, ) self._default_domain_window = { "x_domain": (-1, 1), "y_domain": (-1, 1), "x_window": (-1, 1), "y_window": (-1, 1), } self.x_domain = x_domain or self._default_domain_window["x_domain"] self.y_domain = y_domain or self._default_domain_window["y_domain"] self.x_window = x_window or self._default_domain_window["x_window"] self.y_window = y_window or self._default_domain_window["y_window"] def prepare_inputs(self, x, y, **kwargs): inputs, broadcasted_shapes = super().prepare_inputs(x, y, **kwargs) x, y = inputs return (x, y), broadcasted_shapes def evaluate(self, x, y, *coeffs): if self.x_domain is not None: x = poly_map_domain(x, self.x_domain, self.x_window) if self.y_domain is not None: y = poly_map_domain(y, self.y_domain, self.y_window) invcoeff = self.invlex_coeff(coeffs) result = self.multivariate_horner(x, y, invcoeff) # Special case for degree==0 to ensure that the shape of the output is # still as expected by the broadcasting rules, even though the x and y # inputs are not used in the evaluation if self.degree == 0: output_shape = check_broadcast(np.shape(coeffs[0]), x.shape) if output_shape: new_result = np.empty(output_shape) new_result[:] = result result = new_result return result def __repr__(self): return self._format_repr( [self.degree], kwargs={ "x_domain": self.x_domain, "y_domain": self.y_domain, "x_window": self.x_window, "y_window": self.y_window, }, defaults=self._default_domain_window, ) def __str__(self): return self._format_str( [ ("Degree", self.degree), ("X_Domain", self.x_domain), ("Y_Domain", self.y_domain), ("X_Window", self.x_window), ("Y_Window", self.y_window), ], self._default_domain_window, ) def fit_deriv(self, x, y, *params): """ Computes the Vandermonde matrix. Parameters ---------- x : ndarray input y : ndarray input *params throw-away parameter list returned by non-linear fitters Returns ------- result : ndarray The Vandermonde matrix """ if x.ndim == 2: x = x.flatten() if y.ndim == 2: y = y.flatten() if x.size != y.size: raise ValueError("Expected x and y to be of equal size") designx = x[:, None] ** np.arange(self.degree + 1) designy = y[:, None] ** np.arange(1, self.degree + 1) designmixed = [] for i in range(1, self.degree): for j in range(1, self.degree): if i + j <= self.degree: designmixed.append((x**i) * (y**j)) designmixed = np.array(designmixed).T if designmixed.any(): v = np.hstack([designx, designy, designmixed]) else: v = np.hstack([designx, designy]) return v def invlex_coeff(self, coeffs): invlex_coeffs = [] lencoeff = range(self.degree + 1) for i in lencoeff: for j in lencoeff: if i + j <= self.degree: name = f"c{j}_{i}" coeff = coeffs[self.param_names.index(name)] invlex_coeffs.append(coeff) return invlex_coeffs[::-1] def multivariate_horner(self, x, y, coeffs): """ Multivariate Horner's scheme. Parameters ---------- x, y : array coeffs : array Coefficients in inverse lexical order. """ alpha = self._invlex() r0 = coeffs[0] r1 = r0 * 0.0 r2 = r0 * 0.0 karr = np.diff(alpha, axis=0) for n in range(len(karr)): if karr[n, 1] != 0: r2 = y * (r0 + r1 + r2) r1 = np.zeros_like(coeffs[0], subok=False) else: r1 = x * (r0 + r1) r0 = coeffs[n + 1] return r0 + r1 + r2 @property def input_units(self): if self.degree == 0 or (self.c1_0.unit is None and self.c0_1.unit is None): return None return { self.inputs[0]: self.c0_0.unit / self.c1_0.unit, self.inputs[1]: self.c0_0.unit / self.c0_1.unit, } def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): mapping = {} for i in range(self.degree + 1): for j in range(self.degree + 1): if i + j > 2: continue par = getattr(self, f"c{i}_{j}") mapping[par.name] = ( outputs_unit[self.outputs[0]] / inputs_unit[self.inputs[0]] ** i / inputs_unit[self.inputs[1]] ** j ) return mapping @property def x_domain(self): return self._x_domain @x_domain.setter def x_domain(self, val): self._x_domain = _validate_domain_window(val) @property def y_domain(self): return self._y_domain @y_domain.setter def y_domain(self, val): self._y_domain = _validate_domain_window(val) @property def x_window(self): return self._x_window @x_window.setter def x_window(self, val): self._x_window = _validate_domain_window(val) @property def y_window(self): return self._y_window @y_window.setter def y_window(self, val): self._y_window = _validate_domain_window(val) class Chebyshev2D(OrthoPolynomialBase): r""" Bivariate Chebyshev series.. It is defined as .. math:: P_{nm}(x,y) = \sum_{n,m=0}^{n=d,m=d}C_{nm} T_n(x ) T_m(y) where ``T_n(x)`` and ``T_m(y)`` are Chebyshev polynomials of the first kind. For explanation of ``x_domain``, ``y_domain``, ``x_window`` and ``y_window`` see :ref:`Notes regarding usage of domain and window <domain-window-note>`. Parameters ---------- x_degree : int degree in x y_degree : int degree in y x_domain : tuple or None, optional domain of the x independent variable y_domain : tuple or None, optional domain of the y independent variable x_window : tuple or None, optional range of the x independent variable If None, it is set to (-1, 1) Fitters will remap the domain to this window y_window : tuple or None, optional range of the y independent variable If None, it is set to (-1, 1) Fitters will remap the domain to this window **params : dict keyword: value pairs, representing parameter_name: value Notes ----- This model does not support the use of units/quantities, because each term in the sum of Chebyshev polynomials is a polynomial in x and/or y - since the coefficients within each Chebyshev polynomial are fixed, we can't use quantities for x and/or y since the units would not be compatible. For example, the third Chebyshev polynomial (T2) is 2x^2-1, but if x was specified with units, 2x^2 and -1 would have incompatible units. """ _separable = False def __init__( self, x_degree, y_degree, x_domain=None, x_window=None, y_domain=None, y_window=None, n_models=None, model_set_axis=None, name=None, meta=None, **params, ): super().__init__( x_degree, y_degree, x_domain=x_domain, y_domain=y_domain, x_window=x_window, y_window=y_window, n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params, ) def _fcache(self, x, y): """ Calculate the individual Chebyshev functions once and store them in a dictionary to be reused. """ x_terms = self.x_degree + 1 y_terms = self.y_degree + 1 kfunc = {} kfunc[0] = np.ones(x.shape) kfunc[1] = x.copy() kfunc[x_terms] = np.ones(y.shape) kfunc[x_terms + 1] = y.copy() for n in range(2, x_terms): kfunc[n] = 2 * x * kfunc[n - 1] - kfunc[n - 2] for n in range(x_terms + 2, x_terms + y_terms): kfunc[n] = 2 * y * kfunc[n - 1] - kfunc[n - 2] return kfunc def fit_deriv(self, x, y, *params): """ Derivatives with respect to the coefficients. This is an array with Chebyshev polynomials: .. math:: T_{x_0}T_{y_0}, T_{x_1}T_{y_0}...T_{x_n}T_{y_0}...T_{x_n}T_{y_m} Parameters ---------- x : ndarray input y : ndarray input *params throw-away parameter list returned by non-linear fitters Returns ------- result : ndarray The Vandermonde matrix """ if x.shape != y.shape: raise ValueError("x and y must have the same shape") x = x.flatten() y = y.flatten() x_deriv = self._chebderiv1d(x, self.x_degree + 1).T y_deriv = self._chebderiv1d(y, self.y_degree + 1).T ij = [] for i in range(self.y_degree + 1): for j in range(self.x_degree + 1): ij.append(x_deriv[j] * y_deriv[i]) v = np.array(ij) return v.T def _chebderiv1d(self, x, deg): """ Derivative of 1D Chebyshev series. """ x = np.array(x, dtype=float, copy=False, ndmin=1) d = np.empty((deg + 1, len(x)), dtype=x.dtype) d[0] = x * 0 + 1 if deg > 0: x2 = 2 * x d[1] = x for i in range(2, deg + 1): d[i] = d[i - 1] * x2 - d[i - 2] return np.rollaxis(d, 0, d.ndim) class Legendre2D(OrthoPolynomialBase): r""" Bivariate Legendre series. Defined as: .. math:: P_{n_m}(x,y) = \sum_{n,m=0}^{n=d,m=d}C_{nm} L_n(x ) L_m(y) where ``L_n(x)`` and ``L_m(y)`` are Legendre polynomials. For explanation of ``x_domain``, ``y_domain``, ``x_window`` and ``y_window`` see :ref:`Notes regarding usage of domain and window <domain-window-note>`. Parameters ---------- x_degree : int degree in x y_degree : int degree in y x_domain : tuple or None, optional domain of the x independent variable y_domain : tuple or None, optional domain of the y independent variable x_window : tuple or None, optional range of the x independent variable If None, it is set to (-1, 1) Fitters will remap the domain to this window y_window : tuple or None, optional range of the y independent variable If None, it is set to (-1, 1) Fitters will remap the domain to this window **params : dict keyword: value pairs, representing parameter_name: value Notes ----- Model formula: .. math:: P(x) = \sum_{i=0}^{i=n}C_{i} * L_{i}(x) where ``L_{i}`` is the corresponding Legendre polynomial. This model does not support the use of units/quantities, because each term in the sum of Legendre polynomials is a polynomial in x - since the coefficients within each Legendre polynomial are fixed, we can't use quantities for x since the units would not be compatible. For example, the third Legendre polynomial (P2) is 1.5x^2-0.5, but if x was specified with units, 1.5x^2 and -0.5 would have incompatible units. """ _separable = False def __init__( self, x_degree, y_degree, x_domain=None, x_window=None, y_domain=None, y_window=None, n_models=None, model_set_axis=None, name=None, meta=None, **params, ): super().__init__( x_degree, y_degree, x_domain=x_domain, y_domain=y_domain, x_window=x_window, y_window=y_window, n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params, ) def _fcache(self, x, y): """ Calculate the individual Legendre functions once and store them in a dictionary to be reused. """ x_terms = self.x_degree + 1 y_terms = self.y_degree + 1 kfunc = {} kfunc[0] = np.ones(x.shape) kfunc[1] = x.copy() kfunc[x_terms] = np.ones(y.shape) kfunc[x_terms + 1] = y.copy() for n in range(2, x_terms): kfunc[n] = ( (2 * (n - 1) + 1) * x * kfunc[n - 1] - (n - 1) * kfunc[n - 2] ) / n for n in range(2, y_terms): kfunc[n + x_terms] = ( (2 * (n - 1) + 1) * y * kfunc[n + x_terms - 1] - (n - 1) * kfunc[n + x_terms - 2] ) / (n) return kfunc def fit_deriv(self, x, y, *params): """Derivatives with respect to the coefficients. This is an array with Legendre polynomials: Lx0Ly0 Lx1Ly0...LxnLy0...LxnLym Parameters ---------- x : ndarray input y : ndarray input *params throw-away parameter list returned by non-linear fitters Returns ------- result : ndarray The Vandermonde matrix """ if x.shape != y.shape: raise ValueError("x and y must have the same shape") x = x.flatten() y = y.flatten() x_deriv = self._legendderiv1d(x, self.x_degree + 1).T y_deriv = self._legendderiv1d(y, self.y_degree + 1).T ij = [] for i in range(self.y_degree + 1): for j in range(self.x_degree + 1): ij.append(x_deriv[j] * y_deriv[i]) v = np.array(ij) return v.T def _legendderiv1d(self, x, deg): """Derivative of 1D Legendre polynomial.""" x = np.array(x, dtype=float, copy=False, ndmin=1) d = np.empty((deg + 1,) + x.shape, dtype=x.dtype) d[0] = x * 0 + 1 if deg > 0: d[1] = x for i in range(2, deg + 1): d[i] = (d[i - 1] * x * (2 * i - 1) - d[i - 2] * (i - 1)) / i return np.rollaxis(d, 0, d.ndim) class _SIP1D(PolynomialBase): """ This implements the Simple Imaging Polynomial Model (SIP) in 1D. It's unlikely it will be used in 1D so this class is private and SIP should be used instead. """ n_inputs = 2 n_outputs = 1 _separable = False def __init__( self, order, coeff_prefix, n_models=None, model_set_axis=None, name=None, meta=None, **params, ): self.order = order self.coeff_prefix = coeff_prefix self._param_names = self._generate_coeff_names(coeff_prefix) if n_models: if model_set_axis is None: model_set_axis = 0 minshape = (1,) * model_set_axis + (n_models,) else: minshape = () for param_name in self._param_names: self._parameters_[param_name] = Parameter( param_name, default=np.zeros(minshape) ) super().__init__( n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta, **params, ) def __repr__(self): return self._format_repr(args=[self.order, self.coeff_prefix]) def __str__(self): return self._format_str( [("Order", self.order), ("Coeff. Prefix", self.coeff_prefix)] ) def evaluate(self, x, y, *coeffs): # TODO: Rewrite this so that it uses a simpler method of determining # the matrix based on the number of given coefficients. mcoef = self._coeff_matrix(self.coeff_prefix, coeffs) return self._eval_sip(x, y, mcoef) def get_num_coeff(self, ndim): """ Return the number of coefficients in one param set. """ if self.order < 2 or self.order > 9: raise ValueError("Degree of polynomial must be 2< deg < 9") nmixed = comb(self.order, ndim) # remove 3 terms because SIP deg >= 2 numc = self.order * ndim + nmixed - 2 return numc def _generate_coeff_names(self, coeff_prefix): names = [] for i in range(2, self.order + 1): names.append(f"{coeff_prefix}_{i}_{0}") for i in range(2, self.order + 1): names.append(f"{coeff_prefix}_{0}_{i}") for i in range(1, self.order): for j in range(1, self.order): if i + j < self.order + 1: names.append(f"{coeff_prefix}_{i}_{j}") return tuple(names) def _coeff_matrix(self, coeff_prefix, coeffs): mat = np.zeros((self.order + 1, self.order + 1)) for i in range(2, self.order + 1): attr = f"{coeff_prefix}_{i}_{0}" mat[i, 0] = coeffs[self.param_names.index(attr)] for i in range(2, self.order + 1): attr = f"{coeff_prefix}_{0}_{i}" mat[0, i] = coeffs[self.param_names.index(attr)] for i in range(1, self.order): for j in range(1, self.order): if i + j < self.order + 1: attr = f"{coeff_prefix}_{i}_{j}" mat[i, j] = coeffs[self.param_names.index(attr)] return mat def _eval_sip(self, x, y, coef): x = np.asarray(x, dtype=np.float64) y = np.asarray(y, dtype=np.float64) if self.coeff_prefix == "A": result = np.zeros(x.shape) else: result = np.zeros(y.shape) for i in range(coef.shape[0]): for j in range(coef.shape[1]): if 1 < i + j < self.order + 1: result = result + coef[i, j] * x**i * y**j return result class SIP(Model): """ Simple Imaging Polynomial (SIP) model. The SIP convention is used to represent distortions in FITS image headers. See [1]_ for a description of the SIP convention. Parameters ---------- crpix : list or (2,) ndarray CRPIX values a_order : int SIP polynomial order for first axis b_order : int SIP order for second axis a_coeff : dict SIP coefficients for first axis b_coeff : dict SIP coefficients for the second axis ap_order : int order for the inverse transformation (AP coefficients) bp_order : int order for the inverse transformation (BP coefficients) ap_coeff : dict coefficients for the inverse transform bp_coeff : dict coefficients for the inverse transform References ---------- .. [1] `David Shupe, et al, ADASS, ASP Conference Series, Vol. 347, 2005 <https://ui.adsabs.harvard.edu/abs/2005ASPC..347..491S>`_ """ n_inputs = 2 n_outputs = 2 _separable = False def __init__( self, crpix, a_order, b_order, a_coeff={}, b_coeff={}, ap_order=None, bp_order=None, ap_coeff={}, bp_coeff={}, n_models=None, model_set_axis=None, name=None, meta=None, ): self._crpix = crpix self._a_order = a_order self._b_order = b_order self._a_coeff = a_coeff self._b_coeff = b_coeff self._ap_order = ap_order self._bp_order = bp_order self._ap_coeff = ap_coeff self._bp_coeff = bp_coeff self.shift_a = Shift(-crpix[0]) self.shift_b = Shift(-crpix[1]) self.sip1d_a = _SIP1D( a_order, coeff_prefix="A", n_models=n_models, model_set_axis=model_set_axis, **a_coeff, ) self.sip1d_b = _SIP1D( b_order, coeff_prefix="B", n_models=n_models, model_set_axis=model_set_axis, **b_coeff, ) super().__init__( n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta ) self._inputs = ("u", "v") self._outputs = ("x", "y") def __repr__(self): return ( f"<{self.__class__.__name__}" f"({[self.shift_a, self.shift_b, self.sip1d_a, self.sip1d_b]!r})>" ) def __str__(self): parts = [f"Model: {self.__class__.__name__}"] for model in [self.shift_a, self.shift_b, self.sip1d_a, self.sip1d_b]: parts.append(indent(str(model), width=4)) parts.append("") return "\n".join(parts) @property def inverse(self): if self._ap_order is not None and self._bp_order is not None: return InverseSIP( self._ap_order, self._bp_order, self._ap_coeff, self._bp_coeff ) else: raise NotImplementedError("SIP inverse coefficients are not available.") def evaluate(self, x, y): u = self.shift_a.evaluate(x, *self.shift_a.param_sets) v = self.shift_b.evaluate(y, *self.shift_b.param_sets) f = self.sip1d_a.evaluate(u, v, *self.sip1d_a.param_sets) g = self.sip1d_b.evaluate(u, v, *self.sip1d_b.param_sets) return f, g class InverseSIP(Model): """ Inverse Simple Imaging Polynomial. Parameters ---------- ap_order : int order for the inverse transformation (AP coefficients) bp_order : int order for the inverse transformation (BP coefficients) ap_coeff : dict coefficients for the inverse transform bp_coeff : dict coefficients for the inverse transform """ n_inputs = 2 n_outputs = 2 _separable = False def __init__( self, ap_order, bp_order, ap_coeff={}, bp_coeff={}, n_models=None, model_set_axis=None, name=None, meta=None, ): self._ap_order = ap_order self._bp_order = bp_order self._ap_coeff = ap_coeff self._bp_coeff = bp_coeff # define the 0th term in order to use Polynomial2D ap_coeff.setdefault("AP_0_0", 0) bp_coeff.setdefault("BP_0_0", 0) ap_coeff_params = {k.replace("AP_", "c"): v for k, v in ap_coeff.items()} bp_coeff_params = {k.replace("BP_", "c"): v for k, v in bp_coeff.items()} self.sip1d_ap = Polynomial2D( degree=ap_order, model_set_axis=model_set_axis, **ap_coeff_params ) self.sip1d_bp = Polynomial2D( degree=bp_order, model_set_axis=model_set_axis, **bp_coeff_params ) super().__init__( n_models=n_models, model_set_axis=model_set_axis, name=name, meta=meta ) def __repr__(self): return f"<{self.__class__.__name__}({[self.sip1d_ap, self.sip1d_bp]!r})>" def __str__(self): parts = [f"Model: {self.__class__.__name__}"] for model in [self.sip1d_ap, self.sip1d_bp]: parts.append(indent(str(model), width=4)) parts.append("") return "\n".join(parts) def evaluate(self, x, y): x1 = self.sip1d_ap.evaluate(x, y, *self.sip1d_ap.param_sets) y1 = self.sip1d_bp.evaluate(x, y, *self.sip1d_bp.param_sets) return x1, y1
88658d4ca3706535da59e92fd7d3c715eed3a79f4b0d7713ab765ebdb6e49e00
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Models that have physical origins. """ # pylint: disable=invalid-name, no-member import warnings import numpy as np from astropy import constants as const from astropy import units as u from astropy.utils.exceptions import AstropyUserWarning from .core import Fittable1DModel from .parameters import InputParameterError, Parameter __all__ = ["BlackBody", "Drude1D", "Plummer1D", "NFW"] class BlackBody(Fittable1DModel): """ Blackbody model using the Planck function. Parameters ---------- temperature : `~astropy.units.Quantity` ['temperature'] Blackbody temperature. scale : float or `~astropy.units.Quantity` ['dimensionless'] Scale factor. If dimensionless, input units will assumed to be in Hz and output units in (erg / (cm ** 2 * s * Hz * sr). If not dimensionless, must be equivalent to either (erg / (cm ** 2 * s * Hz * sr) or erg / (cm ** 2 * s * AA * sr), in which case the result will be returned in the requested units and the scale will be stripped of units (with the float value applied). Notes ----- Model formula: .. math:: B_{\\nu}(T) = A \\frac{2 h \\nu^{3} / c^{2}}{exp(h \\nu / k T) - 1} Examples -------- >>> from astropy.modeling import models >>> from astropy import units as u >>> bb = models.BlackBody(temperature=5000*u.K) >>> bb(6000 * u.AA) # doctest: +FLOAT_CMP <Quantity 1.53254685e-05 erg / (Hz s sr cm2)> .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import BlackBody from astropy import units as u from astropy.visualization import quantity_support bb = BlackBody(temperature=5778*u.K) wav = np.arange(1000, 110000) * u.AA flux = bb(wav) with quantity_support(): plt.figure() plt.semilogx(wav, flux) plt.axvline(bb.nu_max.to(u.AA, equivalencies=u.spectral()).value, ls='--') plt.show() """ # We parametrize this model with a temperature and a scale. temperature = Parameter( default=5000.0, min=0, unit=u.K, description="Blackbody temperature" ) scale = Parameter(default=1.0, min=0, description="Scale factor") # We allow values without units to be passed when evaluating the model, and # in this case the input x values are assumed to be frequencies in Hz or wavelengths # in AA (depending on the choice of output units controlled by units on scale # and stored in self._output_units during init). _input_units_allow_dimensionless = True # We enable the spectral equivalency by default for the spectral axis input_units_equivalencies = {"x": u.spectral()} # Store the native units returned by B_nu equation _native_units = u.erg / (u.cm**2 * u.s * u.Hz * u.sr) # Store the base native output units. If scale is not dimensionless, it # must be equivalent to one of these. If equivalent to SLAM, then # input_units will expect AA for 'x', otherwise Hz. _native_output_units = { "SNU": u.erg / (u.cm**2 * u.s * u.Hz * u.sr), "SLAM": u.erg / (u.cm**2 * u.s * u.AA * u.sr), } def __init__(self, *args, **kwargs): scale = kwargs.get("scale", None) # Support scale with non-dimensionless unit by stripping the unit and # storing as self._output_units. if hasattr(scale, "unit") and not scale.unit.is_equivalent( u.dimensionless_unscaled ): output_units = scale.unit if not output_units.is_equivalent( self._native_units, u.spectral_density(1 * u.AA) ): raise ValueError( "scale units not dimensionless or in " f"surface brightness: {output_units}" ) kwargs["scale"] = scale.value self._output_units = output_units else: self._output_units = self._native_units return super().__init__(*args, **kwargs) def evaluate(self, x, temperature, scale): """Evaluate the model. Parameters ---------- x : float, `~numpy.ndarray`, or `~astropy.units.Quantity` ['frequency'] Frequency at which to compute the blackbody. If no units are given, this defaults to Hz (or AA if `scale` was initialized with units equivalent to erg / (cm ** 2 * s * AA * sr)). temperature : float, `~numpy.ndarray`, or `~astropy.units.Quantity` Temperature of the blackbody. If no units are given, this defaults to Kelvin. scale : float, `~numpy.ndarray`, or `~astropy.units.Quantity` ['dimensionless'] Desired scale for the blackbody. Returns ------- y : number or ndarray Blackbody spectrum. The units are determined from the units of ``scale``. .. note:: Use `numpy.errstate` to suppress Numpy warnings, if desired. .. warning:: Output values might contain ``nan`` and ``inf``. Raises ------ ValueError Invalid temperature. ZeroDivisionError Wavelength is zero (when converting to frequency). """ if not isinstance(temperature, u.Quantity): in_temp = u.Quantity(temperature, u.K) else: in_temp = temperature if not isinstance(x, u.Quantity): # then we assume it has input_units which depends on the # requested output units (either Hz or AA) in_x = u.Quantity(x, self.input_units["x"]) else: in_x = x # Convert to units for calculations, also force double precision with u.add_enabled_equivalencies(u.spectral() + u.temperature()): freq = u.Quantity(in_x, u.Hz, dtype=np.float64) temp = u.Quantity(in_temp, u.K) # Check if input values are physically possible if np.any(temp < 0): raise ValueError(f"Temperature should be positive: {temp}") if not np.all(np.isfinite(freq)) or np.any(freq <= 0): warnings.warn( "Input contains invalid wavelength/frequency value(s)", AstropyUserWarning, ) log_boltz = const.h * freq / (const.k_B * temp) boltzm1 = np.expm1(log_boltz) # Calculate blackbody flux bb_nu = 2.0 * const.h * freq**3 / (const.c**2 * boltzm1) / u.sr if self.scale.unit is not None: # Will be dimensionless at this point, but may not be dimensionless_unscaled if not hasattr(scale, "unit"): # during fitting, scale will be passed without units # but we still need to convert from the input dimensionless # to dimensionless unscaled scale = scale * self.scale.unit scale = scale.to(u.dimensionless_unscaled).value # NOTE: scale is already stripped of any input units y = scale * bb_nu.to(self._output_units, u.spectral_density(freq)) # If the temperature parameter has no unit, we should return a unitless # value. This occurs for instance during fitting, since we drop the # units temporarily. if hasattr(temperature, "unit"): return y return y.value @property def input_units(self): # The input units are those of the 'x' value, which will depend on the # units compatible with the expected output units. if self._output_units.is_equivalent(self._native_output_units["SNU"]): return {self.inputs[0]: u.Hz} else: # only other option is equivalent with SLAM return {self.inputs[0]: u.AA} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {"temperature": u.K} @property def bolometric_flux(self): """Bolometric flux.""" if self.scale.unit is not None: # Will be dimensionless at this point, but may not be dimensionless_unscaled scale = self.scale.quantity.to(u.dimensionless_unscaled) else: scale = self.scale.value # bolometric flux in the native units of the planck function native_bolflux = scale * const.sigma_sb * self.temperature**4 / np.pi # return in more "astro" units return native_bolflux.to(u.erg / (u.cm**2 * u.s)) @property def lambda_max(self): """Peak wavelength when the curve is expressed as power density.""" return const.b_wien / self.temperature @property def nu_max(self): """Peak frequency when the curve is expressed as power density.""" return 2.8214391 * const.k_B * self.temperature / const.h class Drude1D(Fittable1DModel): """ Drude model based one the behavior of electons in materials (esp. metals). Parameters ---------- amplitude : float Peak value x_0 : float Position of the peak fwhm : float Full width at half maximum Model formula: .. math:: f(x) = A \\frac{(fwhm/x_0)^2}{((x/x_0 - x_0/x)^2 + (fwhm/x_0)^2} Examples -------- .. plot:: :include-source: import numpy as np import matplotlib.pyplot as plt from astropy.modeling.models import Drude1D fig, ax = plt.subplots() # generate the curves and plot them x = np.arange(7.5 , 12.5 , 0.1) dmodel = Drude1D(amplitude=1.0, fwhm=1.0, x_0=10.0) ax.plot(x, dmodel(x)) ax.set_xlabel('x') ax.set_ylabel('F(x)') plt.show() """ amplitude = Parameter(default=1.0, description="Peak Value") x_0 = Parameter(default=1.0, description="Position of the peak") fwhm = Parameter(default=1.0, description="Full width at half maximum") @staticmethod def evaluate(x, amplitude, x_0, fwhm): """ One dimensional Drude model function. """ return ( amplitude * ((fwhm / x_0) ** 2) / ((x / x_0 - x_0 / x) ** 2 + (fwhm / x_0) ** 2) ) @staticmethod def fit_deriv(x, amplitude, x_0, fwhm): """ Drude1D model function derivatives. """ d_amplitude = (fwhm / x_0) ** 2 / ((x / x_0 - x_0 / x) ** 2 + (fwhm / x_0) ** 2) d_x_0 = ( -2 * amplitude * d_amplitude * ( (1 / x_0) + d_amplitude * (x_0**2 / fwhm**2) * ( (-x / x_0 - 1 / x) * (x / x_0 - x_0 / x) - (2 * fwhm**2 / x_0**3) ) ) ) d_fwhm = (2 * amplitude * d_amplitude / fwhm) * (1 - d_amplitude) return [d_amplitude, d_x_0, d_fwhm] @property def input_units(self): if self.x_0.unit is None: return None return {self.inputs[0]: self.x_0.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "x_0": inputs_unit[self.inputs[0]], "fwhm": inputs_unit[self.inputs[0]], "amplitude": outputs_unit[self.outputs[0]], } @property def return_units(self): if self.amplitude.unit is None: return None return {self.outputs[0]: self.amplitude.unit} @x_0.validator def x_0(self, val): """Ensure `x_0` is not 0.""" if np.any(val == 0): raise InputParameterError("0 is not an allowed value for x_0") def bounding_box(self, factor=50): """Tuple defining the default ``bounding_box`` limits, ``(x_low, x_high)``. Parameters ---------- factor : float The multiple of FWHM used to define the limits. """ x0 = self.x_0 dx = factor * self.fwhm return (x0 - dx, x0 + dx) class Plummer1D(Fittable1DModel): r"""One dimensional Plummer density profile model. Parameters ---------- mass : float Total mass of cluster. r_plum : float Scale parameter which sets the size of the cluster core. Notes ----- Model formula: .. math:: \rho(r)=\frac{3M}{4\pi a^3}(1+\frac{r^2}{a^2})^{-5/2} References ---------- .. [1] https://ui.adsabs.harvard.edu/abs/1911MNRAS..71..460P """ mass = Parameter(default=1.0, description="Total mass of cluster") r_plum = Parameter( default=1.0, description="Scale parameter which sets the size of the cluster core", ) @staticmethod def evaluate(x, mass, r_plum): """ Evaluate plummer density profile model. """ return ( (3 * mass) / (4 * np.pi * r_plum**3) * (1 + (x / r_plum) ** 2) ** (-5 / 2) ) @staticmethod def fit_deriv(x, mass, r_plum): """ Plummer1D model derivatives. """ d_mass = 3 / ((4 * np.pi * r_plum**3) * (((x / r_plum) ** 2 + 1) ** (5 / 2))) d_r_plum = (6 * mass * x**2 - 9 * mass * r_plum**2) / ( (4 * np.pi * r_plum**6) * (1 + (x / r_plum) ** 2) ** (7 / 2) ) return [d_mass, d_r_plum] @property def input_units(self): if self.mass.unit is None and self.r_plum.unit is None: return None else: return {self.inputs[0]: self.r_plum.unit} def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return { "mass": outputs_unit[self.outputs[0]] * inputs_unit[self.inputs[0]] ** 3, "r_plum": inputs_unit[self.inputs[0]], } class NFW(Fittable1DModel): r""" Navarro–Frenk–White (NFW) profile - model for radial distribution of dark matter. Parameters ---------- mass : float or `~astropy.units.Quantity` ['mass'] Mass of NFW peak within specified overdensity radius. concentration : float Concentration of the NFW profile. redshift : float Redshift of the NFW profile. massfactor : tuple or str Mass overdensity factor and type for provided profiles: Tuple version: ("virial",) : virial radius ("critical", N) : radius where density is N times that of the critical density ("mean", N) : radius where density is N times that of the mean density String version: "virial" : virial radius "Nc" : radius where density is N times that of the critical density (e.g. "200c") "Nm" : radius where density is N times that of the mean density (e.g. "500m") cosmo : :class:`~astropy.cosmology.Cosmology` Background cosmology for density calculation. If None, the default cosmology will be used. Notes ----- Model formula: .. math:: \rho(r)=\frac{\delta_c\rho_{c}}{r/r_s(1+r/r_s)^2} References ---------- .. [1] https://arxiv.org/pdf/astro-ph/9508025 .. [2] https://en.wikipedia.org/wiki/Navarro%E2%80%93Frenk%E2%80%93White_profile .. [3] https://en.wikipedia.org/wiki/Virial_mass """ # Model Parameters # NFW Profile mass mass = Parameter( default=1.0, min=1.0, unit=u.M_sun, description="Peak mass within specified overdensity radius", ) # NFW profile concentration concentration = Parameter(default=1.0, min=1.0, description="Concentration") # NFW Profile redshift redshift = Parameter(default=0.0, min=0.0, description="Redshift") # We allow values without units to be passed when evaluating the model, and # in this case the input r values are assumed to be lengths / positions in kpc. _input_units_allow_dimensionless = True def __init__( self, mass=u.Quantity(mass.default, mass.unit), concentration=concentration.default, redshift=redshift.default, massfactor=("critical", 200), cosmo=None, **kwargs, ): # Set default cosmology if cosmo is None: # LOCAL from astropy.cosmology import default_cosmology cosmo = default_cosmology.get() # Set mass overdensity type and factor self._density_delta(massfactor, cosmo, redshift) # Establish mass units for density calculation (default solar masses) if not isinstance(mass, u.Quantity): in_mass = u.Quantity(mass, u.M_sun) else: in_mass = mass # Obtain scale radius self._radius_s(mass, concentration) # Obtain scale density self._density_s(mass, concentration) super().__init__( mass=in_mass, concentration=concentration, redshift=redshift, **kwargs ) def evaluate(self, r, mass, concentration, redshift): """ One dimensional NFW profile function. Parameters ---------- r : float or `~astropy.units.Quantity` ['length'] Radial position of density to be calculated for the NFW profile. mass : float or `~astropy.units.Quantity` ['mass'] Mass of NFW peak within specified overdensity radius. concentration : float Concentration of the NFW profile. redshift : float Redshift of the NFW profile. Returns ------- density : float or `~astropy.units.Quantity` ['density'] NFW profile mass density at location ``r``. The density units are: [``mass`` / ``r`` ^3] Notes ----- .. warning:: Output values might contain ``nan`` and ``inf``. """ # Create radial version of input with dimension if hasattr(r, "unit"): in_r = r else: in_r = u.Quantity(r, u.kpc) # Define reduced radius (r / r_{\\rm s}) # also update scale radius radius_reduced = in_r / self._radius_s(mass, concentration).to(in_r.unit) # Density distribution # \rho (r)=\frac{\rho_0}{\frac{r}{R_s}\left(1~+~\frac{r}{R_s}\right)^2} # also update scale density density = self._density_s(mass, concentration) / ( radius_reduced * (u.Quantity(1.0) + radius_reduced) ** 2 ) if hasattr(mass, "unit"): return density else: return density.value def _density_delta(self, massfactor, cosmo, redshift): """ Calculate density delta. """ # Set mass overdensity type and factor if isinstance(massfactor, tuple): # Tuple options # ("virial") : virial radius # ("critical", N) : radius where density is N that of the critical density # ("mean", N) : radius where density is N that of the mean density if massfactor[0].lower() == "virial": # Virial Mass delta = None masstype = massfactor[0].lower() elif massfactor[0].lower() == "critical": # Critical or Mean Overdensity Mass delta = float(massfactor[1]) masstype = "c" elif massfactor[0].lower() == "mean": # Critical or Mean Overdensity Mass delta = float(massfactor[1]) masstype = "m" else: raise ValueError( f"Massfactor '{massfactor[0]}' not one of 'critical', " "'mean', or 'virial'" ) else: try: # String options # virial : virial radius # Nc : radius where density is N that of the critical density # Nm : radius where density is N that of the mean density if massfactor.lower() == "virial": # Virial Mass delta = None masstype = massfactor.lower() elif massfactor[-1].lower() == "c" or massfactor[-1].lower() == "m": # Critical or Mean Overdensity Mass delta = float(massfactor[0:-1]) masstype = massfactor[-1].lower() else: raise ValueError( f"Massfactor {massfactor} string not of the form " "'#m', '#c', or 'virial'" ) except (AttributeError, TypeError): raise TypeError(f"Massfactor {massfactor} not a tuple or string") # Set density from masstype specification if masstype == "virial": Om_c = cosmo.Om(redshift) - 1.0 d_c = 18.0 * np.pi**2 + 82.0 * Om_c - 39.0 * Om_c**2 self.density_delta = d_c * cosmo.critical_density(redshift) elif masstype == "c": self.density_delta = delta * cosmo.critical_density(redshift) elif masstype == "m": self.density_delta = ( delta * cosmo.critical_density(redshift) * cosmo.Om(redshift) ) return self.density_delta @staticmethod def A_NFW(y): r""" Dimensionless volume integral of the NFW profile, used as an intermediate step in some calculations for this model. Notes ----- Model formula: .. math:: A_{NFW} = [\ln(1+y) - \frac{y}{1+y}] """ return np.log(1.0 + y) - (y / (1.0 + y)) def _density_s(self, mass, concentration): """ Calculate scale density of the NFW profile. """ # Enforce default units if not isinstance(mass, u.Quantity): in_mass = u.Quantity(mass, u.M_sun) else: in_mass = mass # Calculate scale density # M_{200} = 4\pi \rho_{s} R_{s}^3 \left[\ln(1+c) - \frac{c}{1+c}\right]. self.density_s = in_mass / ( 4.0 * np.pi * self._radius_s(in_mass, concentration) ** 3 * self.A_NFW(concentration) ) return self.density_s @property def rho_scale(self): r""" Scale density of the NFW profile. Often written in the literature as :math:`\rho_s`. """ return self.density_s def _radius_s(self, mass, concentration): """ Calculate scale radius of the NFW profile. """ # Enforce default units if not isinstance(mass, u.Quantity): in_mass = u.Quantity(mass, u.M_sun) else: in_mass = mass # Delta Mass is related to delta radius by # M_{200}=\frac{4}{3}\pi r_{200}^3 200 \rho_{c} # And delta radius is related to the NFW scale radius by # c = R / r_{\\rm s} self.radius_s = ( ((3.0 * in_mass) / (4.0 * np.pi * self.density_delta)) ** (1.0 / 3.0) ) / concentration # Set radial units to kiloparsec by default (unit will be rescaled by units of radius # in evaluate) return self.radius_s.to(u.kpc) @property def r_s(self): """ Scale radius of the NFW profile. """ return self.radius_s @property def r_virial(self): """ Mass factor defined virial radius of the NFW profile (R200c for M200c, Rvir for Mvir, etc.). """ return self.r_s * self.concentration @property def r_max(self): """ Radius of maximum circular velocity. """ return self.r_s * 2.16258 @property def v_max(self): """ Maximum circular velocity. """ return self.circular_velocity(self.r_max) def circular_velocity(self, r): r""" Circular velocities of the NFW profile. Parameters ---------- r : float or `~astropy.units.Quantity` ['length'] Radial position of velocity to be calculated for the NFW profile. Returns ------- velocity : float or `~astropy.units.Quantity` ['speed'] NFW profile circular velocity at location ``r``. The velocity units are: [km / s] Notes ----- Model formula: .. math:: v_{circ}(r)^2 = \frac{1}{x}\frac{\ln(1+cx)-(cx)/(1+cx)}{\ln(1+c)-c/(1+c)} .. math:: x = r/r_s .. warning:: Output values might contain ``nan`` and ``inf``. """ # Enforce default units (if parameters are without units) if hasattr(r, "unit"): in_r = r else: in_r = u.Quantity(r, u.kpc) # Mass factor defined velocity (i.e. V200c for M200c, Rvir for Mvir) v_profile = np.sqrt( self.mass * const.G.to(in_r.unit**3 / (self.mass.unit * u.s**2)) / self.r_virial ) # Define reduced radius (r / r_{\\rm s}) reduced_radius = in_r / self.r_virial.to(in_r.unit) # Circular velocity given by: # v^2=\frac{1}{x}\frac{\ln(1+cx)-(cx)/(1+cx)}{\ln(1+c)-c/(1+c)} # where x=r/r_{200} velocity = np.sqrt( (v_profile**2 * self.A_NFW(self.concentration * reduced_radius)) / (reduced_radius * self.A_NFW(self.concentration)) ) return velocity.to(u.km / u.s) @property def input_units(self): # The units for the 'r' variable should be a length (default kpc) return {self.inputs[0]: u.kpc} @property def return_units(self): # The units for the 'density' variable should be a matter density (default M_sun / kpc^3) if self.mass.unit is None: return {self.outputs[0]: u.M_sun / self.input_units[self.inputs[0]] ** 3} else: return { self.outputs[0]: self.mass.unit / self.input_units[self.inputs[0]] ** 3 } def _parameter_units_for_data_units(self, inputs_unit, outputs_unit): return {"mass": u.M_sun, "concentration": None, "redshift": None}
61f389b337ef8e554599cda9ca89e30949bb1366d54df23a3b9c8f89183aed18
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Sundry function and class decorators.""" import functools import inspect import textwrap import threading import types import warnings from inspect import signature from .exceptions import ( AstropyDeprecationWarning, AstropyPendingDeprecationWarning, AstropyUserWarning, ) __all__ = [ "classproperty", "deprecated", "deprecated_attribute", "deprecated_renamed_argument", "format_doc", "lazyproperty", "sharedmethod", ] _NotFound = object() def deprecated( since, message="", name="", alternative="", pending=False, obj_type=None, warning_type=AstropyDeprecationWarning, ): """ Used to mark a function or class as deprecated. To mark an attribute as deprecated, use `deprecated_attribute`. Parameters ---------- since : str The release at which this API became deprecated. This is required. message : str, optional Override the default deprecation message. The format specifier ``func`` may be used for the name of the function, and ``alternative`` may be used in the deprecation message to insert the name of an alternative to the deprecated function. ``obj_type`` may be used to insert a friendly name for the type of object being deprecated. name : str, optional The name of the deprecated function or class; if not provided the name is automatically determined from the passed in function or class, though this is useful in the case of renamed functions, where the new function is just assigned to the name of the deprecated function. For example:: def new_function(): ... oldFunction = new_function alternative : str, optional An alternative function or class name that the user may use in place of the deprecated object. The deprecation warning will tell the user about this alternative if provided. pending : bool, optional If True, uses a AstropyPendingDeprecationWarning instead of a ``warning_type``. obj_type : str, optional The type of this object, if the automatically determined one needs to be overridden. warning_type : Warning Warning to be issued. Default is `~astropy.utils.exceptions.AstropyDeprecationWarning`. """ method_types = (classmethod, staticmethod, types.MethodType) def deprecate_doc(old_doc, message): """ Returns a given docstring with a deprecation message prepended to it. """ if not old_doc: old_doc = "" old_doc = textwrap.dedent(old_doc).strip("\n") new_doc = f"\n.. deprecated:: {since}\n {message.strip()}\n\n" + old_doc if not old_doc: # This is to prevent a spurious 'unexpected unindent' warning from # docutils when the original docstring was blank. new_doc += r"\ " return new_doc def get_function(func): """ Given a function or classmethod (or other function wrapper type), get the function object. """ if isinstance(func, method_types): func = func.__func__ return func def deprecate_function(func, message, warning_type=warning_type): """ Returns a wrapped function that displays ``warning_type`` when it is called. """ if isinstance(func, method_types): func_wrapper = type(func) else: func_wrapper = lambda f: f func = get_function(func) def deprecated_func(*args, **kwargs): if pending: category = AstropyPendingDeprecationWarning else: category = warning_type warnings.warn(message, category, stacklevel=2) return func(*args, **kwargs) # If this is an extension function, we can't call # functools.wraps on it, but we normally don't care. # This crazy way to get the type of a wrapper descriptor is # straight out of the Python 3.3 inspect module docs. if type(func) is not type(str.__dict__["__add__"]): deprecated_func = functools.wraps(func)(deprecated_func) deprecated_func.__doc__ = deprecate_doc(deprecated_func.__doc__, message) return func_wrapper(deprecated_func) def deprecate_class(cls, message, warning_type=warning_type): """ Update the docstring and wrap the ``__init__`` in-place (or ``__new__`` if the class or any of the bases overrides ``__new__``) so it will give a deprecation warning when an instance is created. This won't work for extension classes because these can't be modified in-place and the alternatives don't work in the general case: - Using a new class that looks and behaves like the original doesn't work because the __new__ method of extension types usually makes sure that it's the same class or a subclass. - Subclassing the class and return the subclass can lead to problems with pickle and will look weird in the Sphinx docs. """ cls.__doc__ = deprecate_doc(cls.__doc__, message) if cls.__new__ is object.__new__: cls.__init__ = deprecate_function( get_function(cls.__init__), message, warning_type ) else: cls.__new__ = deprecate_function( get_function(cls.__new__), message, warning_type ) return cls def deprecate( obj, message=message, name=name, alternative=alternative, pending=pending, warning_type=warning_type, ): if obj_type is None: if isinstance(obj, type): obj_type_name = "class" elif inspect.isfunction(obj): obj_type_name = "function" elif inspect.ismethod(obj) or isinstance(obj, method_types): obj_type_name = "method" else: obj_type_name = "object" else: obj_type_name = obj_type if not name: name = get_function(obj).__name__ altmessage = "" if not message or type(message) is type(deprecate): if pending: message = ( "The {func} {obj_type} will be deprecated in a future version." ) else: message = ( "The {func} {obj_type} is deprecated and may " "be removed in a future version." ) if alternative: altmessage = f"\n Use {alternative} instead." message = ( message.format( **{ "func": name, "name": name, "alternative": alternative, "obj_type": obj_type_name, } ) ) + altmessage if isinstance(obj, type): return deprecate_class(obj, message, warning_type) else: return deprecate_function(obj, message, warning_type) if type(message) is type(deprecate): return deprecate(message) return deprecate def deprecated_attribute( name, since, message=None, alternative=None, pending=False, warning_type=AstropyDeprecationWarning, ): """ Used to mark a public attribute as deprecated. This creates a property that will warn when the given attribute name is accessed. To prevent the warning (i.e. for internal code), use the private name for the attribute by prepending an underscore (i.e. ``self._name``), or set an alternative explicitly. Parameters ---------- name : str The name of the deprecated attribute. since : str The release at which this API became deprecated. This is required. message : str, optional Override the default deprecation message. The format specifier ``name`` may be used for the name of the attribute, and ``alternative`` may be used in the deprecation message to insert the name of an alternative to the deprecated function. alternative : str, optional An alternative attribute that the user may use in place of the deprecated attribute. The deprecation warning will tell the user about this alternative if provided. pending : bool, optional If True, uses a AstropyPendingDeprecationWarning instead of ``warning_type``. warning_type : Warning Warning to be issued. Default is `~astropy.utils.exceptions.AstropyDeprecationWarning`. Examples -------- :: class MyClass: # Mark the old_name as deprecated old_name = deprecated_attribute("old_name", "0.1") def method(self): self._old_name = 42 class MyClass2: old_name = deprecated_attribute( "old_name", "1.2", alternative="new_name" ) def method(self): self.new_name = 24 """ private_name = alternative or "_" + name specific_deprecated = deprecated( since, name=name, obj_type="attribute", message=message, alternative=alternative, pending=pending, warning_type=warning_type, ) @specific_deprecated def get(self): return getattr(self, private_name) @specific_deprecated def set(self, val): setattr(self, private_name, val) @specific_deprecated def delete(self): delattr(self, private_name) return property(get, set, delete) def deprecated_renamed_argument( old_name, new_name, since, arg_in_kwargs=False, relax=False, pending=False, warning_type=AstropyDeprecationWarning, alternative="", message="", ): """Deprecate a _renamed_ or _removed_ function argument. The decorator assumes that the argument with the ``old_name`` was removed from the function signature and the ``new_name`` replaced it at the **same position** in the signature. If the ``old_name`` argument is given when calling the decorated function the decorator will catch it and issue a deprecation warning and pass it on as ``new_name`` argument. Parameters ---------- old_name : str or sequence of str The old name of the argument. new_name : str or sequence of str or None The new name of the argument. Set this to `None` to remove the argument ``old_name`` instead of renaming it. since : str or number or sequence of str or number The release at which the old argument became deprecated. arg_in_kwargs : bool or sequence of bool, optional If the argument is not a named argument (for example it was meant to be consumed by ``**kwargs``) set this to ``True``. Otherwise the decorator will throw an Exception if the ``new_name`` cannot be found in the signature of the decorated function. Default is ``False``. relax : bool or sequence of bool, optional If ``False`` a ``TypeError`` is raised if both ``new_name`` and ``old_name`` are given. If ``True`` the value for ``new_name`` is used and a Warning is issued. Default is ``False``. pending : bool or sequence of bool, optional If ``True`` this will hide the deprecation warning and ignore the corresponding ``relax`` parameter value. Default is ``False``. warning_type : Warning Warning to be issued. Default is `~astropy.utils.exceptions.AstropyDeprecationWarning`. alternative : str, optional An alternative function or class name that the user may use in place of the deprecated object if ``new_name`` is None. The deprecation warning will tell the user about this alternative if provided. message : str, optional A custom warning message. If provided then ``since`` and ``alternative`` options will have no effect. Raises ------ TypeError If the new argument name cannot be found in the function signature and arg_in_kwargs was False or if it is used to deprecate the name of the ``*args``-, ``**kwargs``-like arguments. At runtime such an Error is raised if both the new_name and old_name were specified when calling the function and "relax=False". Notes ----- The decorator should be applied to a function where the **name** of an argument was changed but it applies the same logic. .. warning:: If ``old_name`` is a list or tuple the ``new_name`` and ``since`` must also be a list or tuple with the same number of entries. ``relax`` and ``arg_in_kwarg`` can be a single bool (applied to all) or also a list/tuple with the same number of entries like ``new_name``, etc. Examples -------- The deprecation warnings are not shown in the following examples. To deprecate a positional or keyword argument:: >>> from astropy.utils.decorators import deprecated_renamed_argument >>> @deprecated_renamed_argument('sig', 'sigma', '1.0') ... def test(sigma): ... return sigma >>> test(2) 2 >>> test(sigma=2) 2 >>> test(sig=2) # doctest: +SKIP 2 To deprecate an argument caught inside the ``**kwargs`` the ``arg_in_kwargs`` has to be set:: >>> @deprecated_renamed_argument('sig', 'sigma', '1.0', ... arg_in_kwargs=True) ... def test(**kwargs): ... return kwargs['sigma'] >>> test(sigma=2) 2 >>> test(sig=2) # doctest: +SKIP 2 By default providing the new and old keyword will lead to an Exception. If a Warning is desired set the ``relax`` argument:: >>> @deprecated_renamed_argument('sig', 'sigma', '1.0', relax=True) ... def test(sigma): ... return sigma >>> test(sig=2) # doctest: +SKIP 2 It is also possible to replace multiple arguments. The ``old_name``, ``new_name`` and ``since`` have to be `tuple` or `list` and contain the same number of entries:: >>> @deprecated_renamed_argument(['a', 'b'], ['alpha', 'beta'], ... ['1.0', 1.2]) ... def test(alpha, beta): ... return alpha, beta >>> test(a=2, b=3) # doctest: +SKIP (2, 3) In this case ``arg_in_kwargs`` and ``relax`` can be a single value (which is applied to all renamed arguments) or must also be a `tuple` or `list` with values for each of the arguments. """ cls_iter = (list, tuple) if isinstance(old_name, cls_iter): n = len(old_name) # Assume that new_name and since are correct (tuple/list with the # appropriate length) in the spirit of the "consenting adults". But the # optional parameters may not be set, so if these are not iterables # wrap them. if not isinstance(arg_in_kwargs, cls_iter): arg_in_kwargs = [arg_in_kwargs] * n if not isinstance(relax, cls_iter): relax = [relax] * n if not isinstance(pending, cls_iter): pending = [pending] * n if not isinstance(message, cls_iter): message = [message] * n else: # To allow a uniform approach later on, wrap all arguments in lists. n = 1 old_name = [old_name] new_name = [new_name] since = [since] arg_in_kwargs = [arg_in_kwargs] relax = [relax] pending = [pending] message = [message] def decorator(function): # The named arguments of the function. arguments = signature(function).parameters keys = list(arguments.keys()) position = [None] * n for i in range(n): # Determine the position of the argument. if arg_in_kwargs[i]: pass else: if new_name[i] is None: param = arguments[old_name[i]] elif new_name[i] in arguments: param = arguments[new_name[i]] # In case the argument is not found in the list of arguments # the only remaining possibility is that it should be caught # by some kind of **kwargs argument. # This case has to be explicitly specified, otherwise throw # an exception! else: raise TypeError( f'"{new_name[i]}" was not specified in the function ' "signature. If it was meant to be part of " '"**kwargs" then set "arg_in_kwargs" to "True"' ) # There are several possibilities now: # 1.) Positional or keyword argument: if param.kind == param.POSITIONAL_OR_KEYWORD: if new_name[i] is None: position[i] = keys.index(old_name[i]) else: position[i] = keys.index(new_name[i]) # 2.) Keyword only argument: elif param.kind == param.KEYWORD_ONLY: # These cannot be specified by position. position[i] = None # 3.) positional-only argument, varargs, varkwargs or some # unknown type: else: raise TypeError( f'cannot replace argument "{new_name[i]}" ' f"of kind {repr(param.kind)}." ) @functools.wraps(function) def wrapper(*args, **kwargs): for i in range(n): msg = message[i] or ( f'"{old_name[i]}" was deprecated in ' f"version {since[i]} and will be removed " "in a future version. " ) # The only way to have oldkeyword inside the function is # that it is passed as kwarg because the oldkeyword # parameter was renamed to newkeyword. if old_name[i] in kwargs: value = kwargs.pop(old_name[i]) # Display the deprecation warning only when it's not # pending. if not pending[i]: if not message[i]: if new_name[i] is not None: msg += f'Use argument "{new_name[i]}" instead.' elif alternative: msg += f"\n Use {alternative} instead." warnings.warn(msg, warning_type, stacklevel=2) # Check if the newkeyword was given as well. newarg_in_args = position[i] is not None and len(args) > position[i] newarg_in_kwargs = new_name[i] in kwargs if newarg_in_args or newarg_in_kwargs: if not pending[i]: # If both are given print a Warning if relax is # True or raise an Exception is relax is False. if relax[i]: warnings.warn( f'"{old_name[i]}" and "{new_name[i]}" ' "keywords were set. " f'Using the value of "{new_name[i]}".', AstropyUserWarning, ) else: raise TypeError( f'cannot specify both "{old_name[i]}" and ' f'"{new_name[i]}".' ) else: # Pass the value of the old argument with the # name of the new argument to the function if new_name[i] is not None: kwargs[new_name[i]] = value # If old argument has no replacement, cast it back. # https://github.com/astropy/astropy/issues/9914 else: kwargs[old_name[i]] = value # Deprecated keyword without replacement is given as # positional argument. elif ( not pending[i] and not new_name[i] and position[i] and len(args) > position[i] ): if alternative and not message[i]: msg += f"\n Use {alternative} instead." warnings.warn(msg, warning_type, stacklevel=2) return function(*args, **kwargs) return wrapper return decorator # TODO: This can still be made to work for setters by implementing an # accompanying metaclass that supports it; we just don't need that right this # second class classproperty(property): """ Similar to `property`, but allows class-level properties. That is, a property whose getter is like a `classmethod`. The wrapped method may explicitly use the `classmethod` decorator (which must become before this decorator), or the `classmethod` may be omitted (it is implicit through use of this decorator). .. note:: classproperty only works for *read-only* properties. It does not currently allow writeable/deletable properties, due to subtleties of how Python descriptors work. In order to implement such properties on a class a metaclass for that class must be implemented. Parameters ---------- fget : callable The function that computes the value of this property (in particular, the function when this is used as a decorator) a la `property`. doc : str, optional The docstring for the property--by default inherited from the getter function. lazy : bool, optional If True, caches the value returned by the first call to the getter function, so that it is only called once (used for lazy evaluation of an attribute). This is analogous to `lazyproperty`. The ``lazy`` argument can also be used when `classproperty` is used as a decorator (see the third example below). When used in the decorator syntax this *must* be passed in as a keyword argument. Examples -------- :: >>> class Foo: ... _bar_internal = 1 ... @classproperty ... def bar(cls): ... return cls._bar_internal + 1 ... >>> Foo.bar 2 >>> foo_instance = Foo() >>> foo_instance.bar 2 >>> foo_instance._bar_internal = 2 >>> foo_instance.bar # Ignores instance attributes 2 As previously noted, a `classproperty` is limited to implementing read-only attributes:: >>> class Foo: ... _bar_internal = 1 ... @classproperty ... def bar(cls): ... return cls._bar_internal ... @bar.setter ... def bar(cls, value): ... cls._bar_internal = value ... Traceback (most recent call last): ... NotImplementedError: classproperty can only be read-only; use a metaclass to implement modifiable class-level properties When the ``lazy`` option is used, the getter is only called once:: >>> class Foo: ... @classproperty(lazy=True) ... def bar(cls): ... print("Performing complicated calculation") ... return 1 ... >>> Foo.bar Performing complicated calculation 1 >>> Foo.bar 1 If a subclass inherits a lazy `classproperty` the property is still re-evaluated for the subclass:: >>> class FooSub(Foo): ... pass ... >>> FooSub.bar Performing complicated calculation 1 >>> FooSub.bar 1 """ def __new__(cls, fget=None, doc=None, lazy=False): if fget is None: # Being used as a decorator--return a wrapper that implements # decorator syntax def wrapper(func): return cls(func, lazy=lazy) return wrapper return super().__new__(cls) def __init__(self, fget, doc=None, lazy=False): self._lazy = lazy if lazy: self._lock = threading.RLock() # Protects _cache self._cache = {} fget = self._wrap_fget(fget) super().__init__(fget=fget, doc=doc) # There is a buglet in Python where self.__doc__ doesn't # get set properly on instances of property subclasses if # the doc argument was used rather than taking the docstring # from fget # Related Python issue: https://bugs.python.org/issue24766 if doc is not None: self.__doc__ = doc def __get__(self, obj, objtype): if self._lazy: val = self._cache.get(objtype, _NotFound) if val is _NotFound: with self._lock: # Check if another thread initialised before we locked. val = self._cache.get(objtype, _NotFound) if val is _NotFound: val = self.fget.__wrapped__(objtype) self._cache[objtype] = val else: # The base property.__get__ will just return self here; # instead we pass objtype through to the original wrapped # function (which takes the class as its sole argument) val = self.fget.__wrapped__(objtype) return val def getter(self, fget): return super().getter(self._wrap_fget(fget)) def setter(self, fset): raise NotImplementedError( "classproperty can only be read-only; use a metaclass to " "implement modifiable class-level properties" ) def deleter(self, fdel): raise NotImplementedError( "classproperty can only be read-only; use a metaclass to " "implement modifiable class-level properties" ) @staticmethod def _wrap_fget(orig_fget): if isinstance(orig_fget, classmethod): orig_fget = orig_fget.__func__ # Using stock functools.wraps instead of the fancier version # found later in this module, which is overkill for this purpose @functools.wraps(orig_fget) def fget(obj): return orig_fget(obj.__class__) return fget # Adapted from the recipe at # http://code.activestate.com/recipes/363602-lazy-property-evaluation class lazyproperty(property): """ Works similarly to property(), but computes the value only once. This essentially memorizes the value of the property by storing the result of its computation in the ``__dict__`` of the object instance. This is useful for computing the value of some property that should otherwise be invariant. For example:: >>> class LazyTest: ... @lazyproperty ... def complicated_property(self): ... print('Computing the value for complicated_property...') ... return 42 ... >>> lt = LazyTest() >>> lt.complicated_property Computing the value for complicated_property... 42 >>> lt.complicated_property 42 As the example shows, the second time ``complicated_property`` is accessed, the ``print`` statement is not executed. Only the return value from the first access off ``complicated_property`` is returned. By default, a setter and deleter are used which simply overwrite and delete, respectively, the value stored in ``__dict__``. Any user-specified setter or deleter is executed before executing these default actions. The one exception is that the default setter is not run if the user setter already sets the new value in ``__dict__`` and returns that value and the returned value is not ``None``. """ def __init__(self, fget, fset=None, fdel=None, doc=None): super().__init__(fget, fset, fdel, doc) self._key = self.fget.__name__ self._lock = threading.RLock() def __get__(self, obj, owner=None): try: obj_dict = obj.__dict__ val = obj_dict.get(self._key, _NotFound) if val is _NotFound: with self._lock: # Check if another thread beat us to it. val = obj_dict.get(self._key, _NotFound) if val is _NotFound: val = self.fget(obj) obj_dict[self._key] = val return val except AttributeError: if obj is None: return self raise def __set__(self, obj, val): obj_dict = obj.__dict__ if self.fset: ret = self.fset(obj, val) if ret is not None and obj_dict.get(self._key) is ret: # By returning the value set the setter signals that it # took over setting the value in obj.__dict__; this # mechanism allows it to override the input value return obj_dict[self._key] = val def __delete__(self, obj): if self.fdel: self.fdel(obj) obj.__dict__.pop(self._key, None) # Delete if present class sharedmethod(classmethod): """ This is a method decorator that allows both an instancemethod and a `classmethod` to share the same name. When using `sharedmethod` on a method defined in a class's body, it may be called on an instance, or on a class. In the former case it behaves like a normal instance method (a reference to the instance is automatically passed as the first ``self`` argument of the method):: >>> class Example: ... @sharedmethod ... def identify(self, *args): ... print('self was', self) ... print('additional args were', args) ... >>> ex = Example() >>> ex.identify(1, 2) self was <astropy.utils.decorators.Example object at 0x...> additional args were (1, 2) In the latter case, when the `sharedmethod` is called directly from a class, it behaves like a `classmethod`:: >>> Example.identify(3, 4) self was <class 'astropy.utils.decorators.Example'> additional args were (3, 4) This also supports a more advanced usage, where the `classmethod` implementation can be written separately. If the class's *metaclass* has a method of the same name as the `sharedmethod`, the version on the metaclass is delegated to:: >>> class ExampleMeta(type): ... def identify(self): ... print('this implements the {0}.identify ' ... 'classmethod'.format(self.__name__)) ... >>> class Example(metaclass=ExampleMeta): ... @sharedmethod ... def identify(self): ... print('this implements the instancemethod') ... >>> Example().identify() this implements the instancemethod >>> Example.identify() this implements the Example.identify classmethod """ def __get__(self, obj, objtype=None): if obj is None: mcls = type(objtype) clsmeth = getattr(mcls, self.__func__.__name__, None) if callable(clsmeth): func = clsmeth else: func = self.__func__ return self._make_method(func, objtype) else: return self._make_method(self.__func__, obj) @staticmethod def _make_method(func, instance): return types.MethodType(func, instance) def format_doc(docstring, *args, **kwargs): """ Replaces the docstring of the decorated object and then formats it. The formatting works like :meth:`str.format` and if the decorated object already has a docstring this docstring can be included in the new documentation if you use the ``{__doc__}`` placeholder. Its primary use is for reusing a *long* docstring in multiple functions when it is the same or only slightly different between them. Parameters ---------- docstring : str or object or None The docstring that will replace the docstring of the decorated object. If it is an object like a function or class it will take the docstring of this object. If it is a string it will use the string itself. One special case is if the string is ``None`` then it will use the decorated functions docstring and formats it. args : passed to :meth:`str.format`. kwargs : passed to :meth:`str.format`. If the function has a (not empty) docstring the original docstring is added to the kwargs with the keyword ``'__doc__'``. Raises ------ ValueError If the ``docstring`` (or interpreted docstring if it was ``None`` or not a string) is empty. IndexError, KeyError If a placeholder in the (interpreted) ``docstring`` was not filled. see :meth:`str.format` for more information. Notes ----- Using this decorator allows, for example Sphinx, to parse the correct docstring. Examples -------- Replacing the current docstring is very easy:: >>> from astropy.utils.decorators import format_doc >>> @format_doc('''Perform num1 + num2''') ... def add(num1, num2): ... return num1+num2 ... >>> help(add) # doctest: +SKIP Help on function add in module __main__: <BLANKLINE> add(num1, num2) Perform num1 + num2 sometimes instead of replacing you only want to add to it:: >>> doc = ''' ... {__doc__} ... Parameters ... ---------- ... num1, num2 : Numbers ... Returns ... ------- ... result: Number ... ''' >>> @format_doc(doc) ... def add(num1, num2): ... '''Perform addition.''' ... return num1+num2 ... >>> help(add) # doctest: +SKIP Help on function add in module __main__: <BLANKLINE> add(num1, num2) Perform addition. Parameters ---------- num1, num2 : Numbers Returns ------- result : Number in case one might want to format it further:: >>> doc = ''' ... Perform {0}. ... Parameters ... ---------- ... num1, num2 : Numbers ... Returns ... ------- ... result: Number ... result of num1 {op} num2 ... {__doc__} ... ''' >>> @format_doc(doc, 'addition', op='+') ... def add(num1, num2): ... return num1+num2 ... >>> @format_doc(doc, 'subtraction', op='-') ... def subtract(num1, num2): ... '''Notes: This one has additional notes.''' ... return num1-num2 ... >>> help(add) # doctest: +SKIP Help on function add in module __main__: <BLANKLINE> add(num1, num2) Perform addition. Parameters ---------- num1, num2 : Numbers Returns ------- result : Number result of num1 + num2 >>> help(subtract) # doctest: +SKIP Help on function subtract in module __main__: <BLANKLINE> subtract(num1, num2) Perform subtraction. Parameters ---------- num1, num2 : Numbers Returns ------- result : Number result of num1 - num2 Notes : This one has additional notes. These methods can be combined; even taking the docstring from another object is possible as docstring attribute. You just have to specify the object:: >>> @format_doc(add) ... def another_add(num1, num2): ... return num1 + num2 ... >>> help(another_add) # doctest: +SKIP Help on function another_add in module __main__: <BLANKLINE> another_add(num1, num2) Perform addition. Parameters ---------- num1, num2 : Numbers Returns ------- result : Number result of num1 + num2 But be aware that this decorator *only* formats the given docstring not the strings passed as ``args`` or ``kwargs`` (not even the original docstring):: >>> @format_doc(doc, 'addition', op='+') ... def yet_another_add(num1, num2): ... '''This one is good for {0}.''' ... return num1 + num2 ... >>> help(yet_another_add) # doctest: +SKIP Help on function yet_another_add in module __main__: <BLANKLINE> yet_another_add(num1, num2) Perform addition. Parameters ---------- num1, num2 : Numbers Returns ------- result : Number result of num1 + num2 This one is good for {0}. To work around it you could specify the docstring to be ``None``:: >>> @format_doc(None, 'addition') ... def last_add_i_swear(num1, num2): ... '''This one is good for {0}.''' ... return num1 + num2 ... >>> help(last_add_i_swear) # doctest: +SKIP Help on function last_add_i_swear in module __main__: <BLANKLINE> last_add_i_swear(num1, num2) This one is good for addition. Using it with ``None`` as docstring allows to use the decorator twice on an object to first parse the new docstring and then to parse the original docstring or the ``args`` and ``kwargs``. """ def set_docstring(obj): if docstring is None: # None means: use the objects __doc__ doc = obj.__doc__ # Delete documentation in this case so we don't end up with # awkwardly self-inserted docs. obj.__doc__ = None elif isinstance(docstring, str): # String: use the string that was given doc = docstring else: # Something else: Use the __doc__ of this doc = docstring.__doc__ if not doc: # In case the docstring is empty it's probably not what was wanted. raise ValueError( "docstring must be a string or containing a " "docstring that is not empty." ) # If the original has a not-empty docstring append it to the format # kwargs. kwargs["__doc__"] = obj.__doc__ or "" obj.__doc__ = doc.format(*args, **kwargs) return obj return set_docstring
5e2d661c89612753488a8688272ec96da2fb036039530809c03b8e78fe6645fe
""" A simple class to manage a piece of global science state. See :ref:`astropy:config-developer` for more details. """ __all__ = ["ScienceState"] class _ScienceStateContext: def __init__(self, parent, value): self._value = value self._parent = parent def __enter__(self): pass def __exit__(self, type, value, tb): self._parent._value = self._value def __repr__(self): # Ensure we have a single-line repr, just in case our # value is not something simple like a string. value_repr, lb, _ = repr(self._parent._value).partition("\n") if lb: value_repr += "..." return f"<ScienceState {self._parent.__name__}: {value_repr}>" class ScienceState: """ Science state subclasses are used to manage global items that can affect science results. Subclasses will generally override `validate` to convert from any of the acceptable inputs (such as strings) to the appropriate internal objects, and set an initial value to the ``_value`` member so it has a default. Examples -------- :: class MyState(ScienceState): @classmethod def validate(cls, value): if value not in ('A', 'B', 'C'): raise ValueError("Must be one of A, B, C") return value """ def __init__(self): raise RuntimeError("This class is a singleton. Do not instantiate.") @classmethod def get(cls): """ Get the current science state value. """ return cls.validate(cls._value) @classmethod def set(cls, value): """Set the current science state value.""" # Create context with current value ctx = _ScienceStateContext(cls, cls._value) # Set new value value = cls.validate(value) cls._value = value # Return context manager return ctx @classmethod def validate(cls, value): """ Validate the value and convert it to its native type, if necessary. """ return value
7919b42eedad45e539f2c7754a88fb999edeaa67719b04134d42d76c5cecd2b2
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Functions related to Python runtime introspection.""" import collections import importlib import inspect import os import sys import types from importlib import metadata from packaging.version import Version from astropy.utils.decorators import deprecated_renamed_argument __all__ = ["resolve_name", "minversion", "find_current_module", "isinstancemethod"] __doctest_skip__ = ["find_current_module"] if sys.version_info[:2] >= (3, 10): from importlib.metadata import packages_distributions else: def packages_distributions(): """ Return a mapping of top-level packages to their distributions. Note: copied from https://github.com/python/importlib_metadata/pull/287. """ pkg_to_dist = collections.defaultdict(list) for dist in metadata.distributions(): for pkg in (dist.read_text("top_level.txt") or "").split(): pkg_to_dist[pkg].append(dist.metadata["Name"]) return dict(pkg_to_dist) def resolve_name(name, *additional_parts): """Resolve a name like ``module.object`` to an object and return it. This ends up working like ``from module import object`` but is easier to deal with than the `__import__` builtin and supports digging into submodules. Parameters ---------- name : `str` A dotted path to a Python object--that is, the name of a function, class, or other object in a module with the full path to that module, including parent modules, separated by dots. Also known as the fully qualified name of the object. additional_parts : iterable, optional If more than one positional arguments are given, those arguments are automatically dotted together with ``name``. Examples -------- >>> resolve_name('astropy.utils.introspection.resolve_name') <function resolve_name at 0x...> >>> resolve_name('astropy', 'utils', 'introspection', 'resolve_name') <function resolve_name at 0x...> Raises ------ `ImportError` If the module or named object is not found. """ additional_parts = ".".join(additional_parts) if additional_parts: name = name + "." + additional_parts parts = name.split(".") if len(parts) == 1: # No dots in the name--just a straight up module import cursor = 1 fromlist = [] else: cursor = len(parts) - 1 fromlist = [parts[-1]] module_name = parts[:cursor] while cursor > 0: try: ret = __import__(".".join(module_name), fromlist=fromlist) break except ImportError: if cursor == 0: raise cursor -= 1 module_name = parts[:cursor] fromlist = [parts[cursor]] ret = "" for part in parts[cursor:]: try: ret = getattr(ret, part) except AttributeError: raise ImportError(name) return ret @deprecated_renamed_argument("version_path", None, "5.0") def minversion(module, version, inclusive=True, version_path="__version__"): """ Returns `True` if the specified Python module satisfies a minimum version requirement, and `False` if not. .. deprecated:: ``version_path`` is not used anymore and is deprecated in ``astropy`` 5.0. Parameters ---------- module : module or `str` An imported module of which to check the version, or the name of that module (in which case an import of that module is attempted-- if this fails `False` is returned). version : `str` The version as a string that this module must have at a minimum (e.g. ``'0.12'``). inclusive : `bool` The specified version meets the requirement inclusively (i.e. ``>=``) as opposed to strictly greater than (default: `True`). Examples -------- >>> import astropy >>> minversion(astropy, '0.4.4') True """ if isinstance(module, types.ModuleType): module_name = module.__name__ module_version = getattr(module, "__version__", None) elif isinstance(module, str): module_name = module module_version = None try: module = resolve_name(module_name) except ImportError: return False else: raise ValueError( "module argument must be an actual imported " "module, or the import name of the module; " f"got {repr(module)}" ) if module_version is None: try: module_version = metadata.version(module_name) except metadata.PackageNotFoundError: # Maybe the distribution name is different from package name. # Calling packages_distributions is costly so we do it only # if necessary, as only a few packages don't have the same # distribution name. dist_names = packages_distributions() module_version = metadata.version(dist_names[module_name][0]) if inclusive: return Version(module_version) >= Version(version) else: return Version(module_version) > Version(version) def find_current_module(depth=1, finddiff=False): """ Determines the module/package from which this function is called. This function has two modes, determined by the ``finddiff`` option. it will either simply go the requested number of frames up the call stack (if ``finddiff`` is False), or it will go up the call stack until it reaches a module that is *not* in a specified set. Parameters ---------- depth : int Specifies how far back to go in the call stack (0-indexed, so that passing in 0 gives back `astropy.utils.misc`). finddiff : bool or list If False, the returned ``mod`` will just be ``depth`` frames up from the current frame. Otherwise, the function will start at a frame ``depth`` up from current, and continue up the call stack to the first module that is *different* from those in the provided list. In this case, ``finddiff`` can be a list of modules or modules names. Alternatively, it can be True, which will use the module ``depth`` call stack frames up as the module the returned module most be different from. Returns ------- mod : module or None The module object or None if the package cannot be found. The name of the module is available as the ``__name__`` attribute of the returned object (if it isn't None). Raises ------ ValueError If ``finddiff`` is a list with an invalid entry. Examples -------- The examples below assume that there are two modules in a package named ``pkg``. ``mod1.py``:: def find1(): from astropy.utils import find_current_module print find_current_module(1).__name__ def find2(): from astropy.utils import find_current_module cmod = find_current_module(2) if cmod is None: print 'None' else: print cmod.__name__ def find_diff(): from astropy.utils import find_current_module print find_current_module(0,True).__name__ ``mod2.py``:: def find(): from .mod1 import find2 find2() With these modules in place, the following occurs:: >>> from pkg import mod1, mod2 >>> from astropy.utils import find_current_module >>> mod1.find1() pkg.mod1 >>> mod1.find2() None >>> mod2.find() pkg.mod2 >>> find_current_module(0) <module 'astropy.utils.misc' from 'astropy/utils/misc.py'> >>> mod1.find_diff() pkg.mod1 """ frm = inspect.currentframe() for i in range(depth): frm = frm.f_back if frm is None: return None if finddiff: currmod = _get_module_from_frame(frm) if finddiff is True: diffmods = [currmod] else: diffmods = [] for fd in finddiff: if inspect.ismodule(fd): diffmods.append(fd) elif isinstance(fd, str): diffmods.append(importlib.import_module(fd)) elif fd is True: diffmods.append(currmod) else: raise ValueError("invalid entry in finddiff") while frm: frmb = frm.f_back modb = _get_module_from_frame(frmb) if modb not in diffmods: return modb frm = frmb else: return _get_module_from_frame(frm) def _get_module_from_frame(frm): """Uses inspect.getmodule() to get the module that the current frame's code is running in. However, this does not work reliably for code imported from a zip file, so this provides a fallback mechanism for that case which is less reliable in general, but more reliable than inspect.getmodule() for this particular case. """ mod = inspect.getmodule(frm) if mod is not None: return mod # Check to see if we're importing from a bundle file. First ensure that # __file__ is available in globals; this is cheap to check to bail out # immediately if this fails if "__file__" in frm.f_globals and "__name__" in frm.f_globals: filename = frm.f_globals["__file__"] # Using __file__ from the frame's globals and getting it into the form # of an absolute path name with .py at the end works pretty well for # looking up the module using the same means as inspect.getmodule if filename[-4:].lower() in (".pyc", ".pyo"): filename = filename[:-4] + ".py" filename = os.path.realpath(os.path.abspath(filename)) if filename in inspect.modulesbyfile: return sys.modules.get(inspect.modulesbyfile[filename]) # On Windows, inspect.modulesbyfile appears to have filenames stored # in lowercase, so we check for this case too. if filename.lower() in inspect.modulesbyfile: return sys.modules.get(inspect.modulesbyfile[filename.lower()]) # Otherwise there are still some even trickier things that might be possible # to track down the module, but we'll leave those out unless we find a case # where it's really necessary. So return None if the module is not found. return None def find_mod_objs(modname, onlylocals=False): """Returns all the public attributes of a module referenced by name. .. note:: The returned list *not* include subpackages or modules of ``modname``, nor does it include private attributes (those that begin with '_' or are not in `__all__`). Parameters ---------- modname : str The name of the module to search. onlylocals : bool or list of str If `True`, only attributes that are either members of ``modname`` OR one of its modules or subpackages will be included. If it is a list of strings, those specify the possible packages that will be considered "local". Returns ------- localnames : list of str A list of the names of the attributes as they are named in the module ``modname`` . fqnames : list of str A list of the full qualified names of the attributes (e.g., ``astropy.utils.introspection.find_mod_objs``). For attributes that are simple variables, this is based on the local name, but for functions or classes it can be different if they are actually defined elsewhere and just referenced in ``modname``. objs : list of objects A list of the actual attributes themselves (in the same order as the other arguments) """ mod = resolve_name(modname) if hasattr(mod, "__all__"): pkgitems = [(k, mod.__dict__[k]) for k in mod.__all__] else: pkgitems = [(k, mod.__dict__[k]) for k in dir(mod) if k[0] != "_"] # filter out modules and pull the names and objs out ismodule = inspect.ismodule localnames = [k for k, v in pkgitems if not ismodule(v)] objs = [v for k, v in pkgitems if not ismodule(v)] # fully qualified names can be determined from the object's module fqnames = [] for obj, lnm in zip(objs, localnames): if hasattr(obj, "__module__") and hasattr(obj, "__name__"): fqnames.append(obj.__module__ + "." + obj.__name__) else: fqnames.append(modname + "." + lnm) if onlylocals: if onlylocals is True: onlylocals = [modname] valids = [any(fqn.startswith(nm) for nm in onlylocals) for fqn in fqnames] localnames = [e for i, e in enumerate(localnames) if valids[i]] fqnames = [e for i, e in enumerate(fqnames) if valids[i]] objs = [e for i, e in enumerate(objs) if valids[i]] return localnames, fqnames, objs # Note: I would have preferred call this is_instancemethod, but this naming is # for consistency with other functions in the `inspect` module def isinstancemethod(cls, obj): """ Returns `True` if the given object is an instance method of the class it is defined on (as opposed to a `staticmethod` or a `classmethod`). This requires both the class the object is a member of as well as the object itself in order to make this determination. Parameters ---------- cls : `type` The class on which this method was defined. obj : `object` A member of the provided class (the membership is not checked directly, but this function will always return `False` if the given object is not a member of the given class). Examples -------- >>> class MetaClass(type): ... def a_classmethod(cls): pass ... >>> class MyClass(metaclass=MetaClass): ... def an_instancemethod(self): pass ... ... @classmethod ... def another_classmethod(cls): pass ... ... @staticmethod ... def a_staticmethod(): pass ... >>> isinstancemethod(MyClass, MyClass.a_classmethod) False >>> isinstancemethod(MyClass, MyClass.another_classmethod) False >>> isinstancemethod(MyClass, MyClass.a_staticmethod) False >>> isinstancemethod(MyClass, MyClass.an_instancemethod) True """ return _isinstancemethod(cls, obj) def _isinstancemethod(cls, obj): if not isinstance(obj, types.FunctionType): return False # Unfortunately it seems the easiest way to get to the original # staticmethod object is to look in the class's __dict__, though we # also need to look up the MRO in case the method is not in the given # class's dict name = obj.__name__ for basecls in cls.mro(): # This includes cls if name in basecls.__dict__: return not isinstance(basecls.__dict__[name], staticmethod) # This shouldn't happen, though this is the most sensible response if # it does. raise AttributeError(name)
3a612fc4f3c9f233aebf9eb17d7c82073bddc7e72de3f65e472fb6d1c313a665
# Licensed under a 3-clause BSD style license - see LICENSE.rst """This module contains functions and methods that relate to the DataInfo class which provides a container for informational attributes as well as summary info methods. A DataInfo object is attached to the Quantity, SkyCoord, and Time classes in astropy. Here it allows those classes to be used in Tables and uniformly carry table column attributes such as name, format, dtype, meta, and description. """ # Note: these functions and classes are tested extensively in astropy table # tests via their use in providing mixin column info, and in # astropy/tests/test_info for providing table and column info summary data. import os import re import sys import warnings import weakref from collections import OrderedDict from contextlib import contextmanager from copy import deepcopy from functools import partial from io import StringIO import numpy as np from . import metadata __all__ = [ "data_info_factory", "dtype_info_name", "BaseColumnInfo", "DataInfo", "MixinInfo", "ParentDtypeInfo", ] # Tuple of filterwarnings kwargs to ignore when calling info IGNORE_WARNINGS = ( dict( category=RuntimeWarning, message=( "All-NaN|" "Mean of empty slice|Degrees of freedom <= 0|" "invalid value encountered in sqrt" ), ), ) @contextmanager def serialize_context_as(context): """Set context for serialization. This will allow downstream code to understand the context in which a column is being serialized. Objects like Time or SkyCoord will have different default serialization representations depending on context. Parameters ---------- context : str Context name, e.g. 'fits', 'hdf5', 'parquet', 'ecsv', 'yaml' """ old_context = BaseColumnInfo._serialize_context BaseColumnInfo._serialize_context = context try: yield finally: BaseColumnInfo._serialize_context = old_context def dtype_info_name(dtype): """Return a human-oriented string name of the ``dtype`` arg. This can be use by astropy methods that present type information about a data object. The output is mostly equivalent to ``dtype.name`` which takes the form <type_name>[B] where <type_name> is like ``int`` or ``bool`` and [B] is an optional number of bits which gets included only for numeric types. The output is shown below for ``bytes`` and ``str`` types, with <N> being the number of characters. This representation corresponds to the Python type that matches the dtype:: Numpy S<N> U<N> Python bytes<N> str<N> Parameters ---------- dtype : str, `~numpy.dtype`, type Input as an object that can be converted via :class:`numpy.dtype`. Returns ------- dtype_info_name : str String name of ``dtype`` """ dtype = np.dtype(dtype) if dtype.names is not None: info_names = ", ".join(dtype_info_name(dt[0]) for dt in dtype.fields.values()) return f"({info_names})" if dtype.subdtype is not None: dtype, shape = dtype.subdtype else: shape = () if dtype.kind in ("S", "U"): type_name = "bytes" if dtype.kind == "S" else "str" length = re.search(r"(\d+)", dtype.str).group(1) out = type_name + length else: out = dtype.name if shape: out += f"[{','.join(str(n) for n in shape)}]" return out def data_info_factory(names, funcs): """ Factory to create a function that can be used as an ``option`` for outputting data object summary information. Examples -------- >>> from astropy.utils.data_info import data_info_factory >>> from astropy.table import Column >>> c = Column([4., 3., 2., 1.]) >>> mystats = data_info_factory(names=['min', 'median', 'max'], ... funcs=[np.min, np.median, np.max]) >>> c.info(option=mystats) min = 1 median = 2.5 max = 4 n_bad = 0 length = 4 Parameters ---------- names : list List of information attribute names funcs : list List of functions that compute the corresponding information attribute Returns ------- func : function Function that can be used as a data info option """ def func(dat): outs = [] for name, func in zip(names, funcs): try: if isinstance(func, str): out = getattr(dat, func)() else: out = func(dat) except Exception: outs.append("--") else: try: outs.append(f"{out:g}") except (TypeError, ValueError): outs.append(str(out)) return OrderedDict(zip(names, outs)) return func def _get_obj_attrs_map(obj, attrs): """ Get the values for object ``attrs`` and return as a dict. This ignores any attributes that are None. In the context of serializing the supported core astropy classes this conversion will succeed and results in more succinct and less python-specific YAML. """ out = {} for attr in attrs: val = getattr(obj, attr, None) if val is not None: out[attr] = val return out def _get_data_attribute(dat, attr=None): """ Get a data object attribute for the ``attributes`` info summary method. """ if attr == "class": val = type(dat).__name__ elif attr == "dtype": val = dtype_info_name(dat.info.dtype) elif attr == "shape": datshape = dat.shape[1:] val = datshape if datshape else "" else: val = getattr(dat.info, attr) if val is None: val = "" return str(val) class InfoAttribute: def __init__(self, attr, default=None): self.attr = attr self.default = default def __get__(self, instance, owner_cls): if instance is None: return self return instance._attrs.get(self.attr, self.default) def __set__(self, instance, value): if instance is None: # This is an unbound descriptor on the class raise ValueError("cannot set unbound descriptor") instance._attrs[self.attr] = value class ParentAttribute: def __init__(self, attr): self.attr = attr def __get__(self, instance, owner_cls): if instance is None: return self return getattr(instance._parent, self.attr) def __set__(self, instance, value): if instance is None: # This is an unbound descriptor on the class raise ValueError("cannot set unbound descriptor") setattr(instance._parent, self.attr, value) class DataInfoMeta(type): def __new__(mcls, name, bases, dct): # Ensure that we do not gain a __dict__, which would mean # arbitrary attributes could be set. dct.setdefault("__slots__", []) return super().__new__(mcls, name, bases, dct) def __init__(cls, name, bases, dct): super().__init__(name, bases, dct) # Define default getters/setters for attributes, if needed. for attr in cls.attr_names: if attr not in dct: # If not defined explicitly for this class, did any of # its superclasses define it, and, if so, was this an # automatically defined look-up-on-parent attribute? cls_attr = getattr(cls, attr, None) if attr in cls.attrs_from_parent: # If the attribute is supposed to be stored on the parent, # and that is stated by this class yet it was not the case # on the superclass, override it. if "attrs_from_parent" in dct and not isinstance( cls_attr, ParentAttribute ): setattr(cls, attr, ParentAttribute(attr)) elif not cls_attr or isinstance(cls_attr, ParentAttribute): # If the attribute is not meant to be stored on the parent, # and if it was not defined already or was previously defined # as an attribute on the parent, define a regular # look-up-on-info attribute setattr( cls, attr, InfoAttribute(attr, cls._attr_defaults.get(attr)) ) class DataInfo(metaclass=DataInfoMeta): """ Descriptor that data classes use to add an ``info`` attribute for storing data attributes in a uniform and portable way. Note that it *must* be called ``info`` so that the DataInfo() object can be stored in the ``instance`` using the ``info`` key. Because owner_cls.x is a descriptor, Python doesn't use __dict__['x'] normally, and the descriptor can safely store stuff there. Thanks to https://nbviewer.jupyter.org/urls/gist.github.com/ChrisBeaumont/5758381/raw/descriptor_writeup.ipynb for this trick that works for non-hashable classes. Parameters ---------- bound : bool If True this is a descriptor attribute in a class definition, else it is a DataInfo() object that is bound to a data object instance. Default is False. """ _stats = ["mean", "std", "min", "max"] attrs_from_parent = set() attr_names = {"name", "unit", "dtype", "format", "description", "meta"} _attr_defaults = {"dtype": np.dtype("O")} _attrs_no_copy = set() _info_summary_attrs = ("dtype", "shape", "unit", "format", "description", "class") __slots__ = ["_parent_cls", "_parent_ref", "_attrs"] # This specifies the list of object attributes which must be stored in # order to re-create the object after serialization. This is independent # of normal `info` attributes like name or description. Subclasses will # generally either define this statically (QuantityInfo) or dynamically # (SkyCoordInfo). These attributes may be scalars or arrays. If arrays # that match the object length they will be serialized as an independent # column. _represent_as_dict_attrs = () # This specifies attributes which are to be provided to the class # initializer as ordered args instead of keyword args. This is needed # for Quantity subclasses where the keyword for data varies (e.g. # between Quantity and Angle). _construct_from_dict_args = () # This specifies the name of an attribute which is the "primary" data. # Then when representing as columns # (table.serialize._represent_mixin_as_column) the output for this # attribute will be written with the just name of the mixin instead of the # usual "<name>.<attr>". _represent_as_dict_primary_data = None def __init__(self, bound=False): # If bound to a data object instance then create the dict of attributes # which stores the info attribute values. Default of None for "unset" # except for dtype where the default is object. if bound: self._attrs = {} @property def _parent(self): try: parent = self._parent_ref() except AttributeError: return None if parent is None: raise AttributeError( """\ failed to access "info" attribute on a temporary object. It looks like you have done something like ``col[3:5].info`` or ``col.quantity.info``, i.e. you accessed ``info`` from a temporary slice object that only exists momentarily. This has failed because the reference to that temporary object is now lost. Instead force a permanent reference (e.g. ``c = col[3:5]`` followed by ``c.info``).""" ) return parent def __get__(self, instance, owner_cls): if instance is None: # This is an unbound descriptor on the class self._parent_cls = owner_cls return self info = instance.__dict__.get("info") if info is None: info = instance.__dict__["info"] = self.__class__(bound=True) # We set _parent_ref on every call, since if one makes copies of # instances, 'info' will be copied as well, which will lose the # reference. info._parent_ref = weakref.ref(instance) return info def __set__(self, instance, value): if instance is None: # This is an unbound descriptor on the class raise ValueError("cannot set unbound descriptor") if isinstance(value, DataInfo): info = instance.__dict__["info"] = self.__class__(bound=True) attr_names = info.attr_names if value.__class__ is self.__class__: # For same class, attributes are guaranteed to be stored in # _attrs, so speed matters up by not accessing defaults. # Doing this before difference in for loop helps speed. attr_names = attr_names & set(value._attrs) # NOT in-place! else: # For different classes, copy over the attributes in common. attr_names = attr_names & (value.attr_names - value._attrs_no_copy) for attr in attr_names - info.attrs_from_parent - info._attrs_no_copy: info._attrs[attr] = deepcopy(getattr(value, attr)) else: raise TypeError("info must be set with a DataInfo instance") def __getstate__(self): return self._attrs def __setstate__(self, state): self._attrs = state def _represent_as_dict(self, attrs=None): """Get the values for the parent ``attrs`` and return as a dict. By default, uses '_represent_as_dict_attrs'. """ if attrs is None: attrs = self._represent_as_dict_attrs return _get_obj_attrs_map(self._parent, attrs) def _construct_from_dict(self, map): args = [map.pop(attr) for attr in self._construct_from_dict_args] return self._parent_cls(*args, **map) info_summary_attributes = staticmethod( data_info_factory( names=_info_summary_attrs, funcs=[ partial(_get_data_attribute, attr=attr) for attr in _info_summary_attrs ], ) ) # No nan* methods in numpy < 1.8 info_summary_stats = staticmethod( data_info_factory( names=_stats, funcs=[getattr(np, "nan" + stat) for stat in _stats] ) ) def __call__(self, option="attributes", out=""): """ Write summary information about data object to the ``out`` filehandle. By default this prints to standard output via sys.stdout. The ``option`` argument specifies what type of information to include. This can be a string, a function, or a list of strings or functions. Built-in options are: - ``attributes``: data object attributes like ``dtype`` and ``format`` - ``stats``: basic statistics: min, mean, and max If a function is specified then that function will be called with the data object as its single argument. The function must return an OrderedDict containing the information attributes. If a list is provided then the information attributes will be appended for each of the options, in order. Examples -------- >>> from astropy.table import Column >>> c = Column([1, 2], unit='m', dtype='int32') >>> c.info() dtype = int32 unit = m class = Column n_bad = 0 length = 2 >>> c.info(['attributes', 'stats']) dtype = int32 unit = m class = Column mean = 1.5 std = 0.5 min = 1 max = 2 n_bad = 0 length = 2 Parameters ---------- option : str, callable, list of (str or callable) Info option, defaults to 'attributes'. out : file-like, None Output destination, defaults to sys.stdout. If None then the OrderedDict with information attributes is returned Returns ------- info : `~collections.OrderedDict` or None `~collections.OrderedDict` if out==None else None """ if out == "": out = sys.stdout dat = self._parent info = OrderedDict() name = dat.info.name if name is not None: info["name"] = name options = option if isinstance(option, (list, tuple)) else [option] for option in options: if isinstance(option, str): if hasattr(self, "info_summary_" + option): option = getattr(self, "info_summary_" + option) else: raise ValueError(f"{option=} is not an allowed information type") with warnings.catch_warnings(): for ignore_kwargs in IGNORE_WARNINGS: warnings.filterwarnings("ignore", **ignore_kwargs) info.update(option(dat)) if hasattr(dat, "mask"): n_bad = np.count_nonzero(dat.mask) else: try: n_bad = np.count_nonzero(np.isinf(dat) | np.isnan(dat)) except Exception: n_bad = 0 info["n_bad"] = n_bad try: info["length"] = len(dat) except (TypeError, IndexError): pass if out is None: return info for key, val in info.items(): if val != "": out.write(f"{key} = {val}" + os.linesep) def __repr__(self): if self._parent is None: return super().__repr__() out = StringIO() self.__call__(out=out) return out.getvalue() class BaseColumnInfo(DataInfo): """Base info class for anything that can be a column in an astropy Table. There are at least two classes that inherit from this: ColumnInfo: for native astropy Column / MaskedColumn objects MixinInfo: for mixin column objects Note that this class is defined here so that mixins can use it without importing the table package. """ attr_names = DataInfo.attr_names | {"parent_table", "indices"} _attrs_no_copy = {"parent_table", "indices"} # Context for serialization. This can be set temporarily via # ``serialize_context_as(context)`` context manager to allow downstream # code to understand the context in which a column is being serialized. # Typical values are 'fits', 'hdf5', 'parquet', 'ecsv', 'yaml'. Objects # like Time or SkyCoord will have different default serialization # representations depending on context. _serialize_context = None __slots__ = ["_format_funcs", "_copy_indices"] @property def parent_table(self): value = self._attrs.get("parent_table") if callable(value): value = value() return value @parent_table.setter def parent_table(self, parent_table): if parent_table is None: self._attrs.pop("parent_table", None) else: parent_table = weakref.ref(parent_table) self._attrs["parent_table"] = parent_table def __init__(self, bound=False): super().__init__(bound=bound) # If bound to a data object instance then add a _format_funcs dict # for caching functions for print formatting. if bound: self._format_funcs = {} def __set__(self, instance, value): # For Table columns do not set `info` when the instance is a scalar. try: if not instance.shape: return except AttributeError: pass super().__set__(instance, value) def iter_str_vals(self): """ This is a mixin-safe version of Column.iter_str_vals. """ col = self._parent if self.parent_table is None: from astropy.table.column import FORMATTER as formatter else: formatter = self.parent_table.formatter _pformat_col_iter = formatter._pformat_col_iter yield from _pformat_col_iter(col, -1, False, False, {}) @property def indices(self): # Implementation note: the auto-generation as an InfoAttribute cannot # be used here, since on access, one should not just return the # default (empty list is this case), but set _attrs['indices'] so that # if the list is appended to, it is registered here. return self._attrs.setdefault("indices", []) @indices.setter def indices(self, indices): self._attrs["indices"] = indices def adjust_indices(self, index, value, col_len): """ Adjust info indices after column modification. Parameters ---------- index : slice, int, list, or ndarray Element(s) of column to modify. This parameter can be a single row number, a list of row numbers, an ndarray of row numbers, a boolean ndarray (a mask), or a column slice. value : int, list, or ndarray New value(s) to insert col_len : int Length of the column """ if not self.indices: return if isinstance(index, slice): # run through each key in slice t = index.indices(col_len) keys = list(range(*t)) elif isinstance(index, np.ndarray) and index.dtype.kind == "b": # boolean mask keys = np.where(index)[0] else: # single int keys = [index] value = np.atleast_1d(value) # turn array(x) into array([x]) if value.size == 1: # repeat single value value = list(value) * len(keys) for key, val in zip(keys, value): for col_index in self.indices: col_index.replace(key, self.name, val) def slice_indices(self, col_slice, item, col_len): """ Given a sliced object, modify its indices to correctly represent the slice. Parameters ---------- col_slice : `~astropy.table.Column` or mixin Sliced object. If not a column, it must be a valid mixin, see https://docs.astropy.org/en/stable/table/mixin_columns.html item : slice, list, or ndarray Slice used to create col_slice col_len : int Length of original object """ from astropy.table.sorted_array import SortedArray if not getattr(self, "_copy_indices", True): # Necessary because MaskedArray will perform a shallow copy col_slice.info.indices = [] return col_slice elif isinstance(item, slice): col_slice.info.indices = [x[item] for x in self.indices] elif self.indices: if isinstance(item, np.ndarray) and item.dtype.kind == "b": # boolean mask item = np.where(item)[0] # Empirical testing suggests that recreating a BST/RBT index is # more effective than relabelling when less than ~60% of # the total number of rows are involved, and is in general # more effective for SortedArray. small = len(item) <= 0.6 * col_len col_slice.info.indices = [] for index in self.indices: if small or isinstance(index, SortedArray): new_index = index.get_slice(col_slice, item) else: new_index = deepcopy(index) new_index.replace_rows(item) col_slice.info.indices.append(new_index) return col_slice @staticmethod def merge_cols_attributes(cols, metadata_conflicts, name, attrs): """ Utility method to merge and validate the attributes ``attrs`` for the input table columns ``cols``. Note that ``dtype`` and ``shape`` attributes are handled specially. These should not be passed in ``attrs`` but will always be in the returned dict of merged attributes. Parameters ---------- cols : list List of input Table column objects metadata_conflicts : str ('warn'|'error'|'silent') How to handle metadata conflicts name : str Output column name attrs : list List of attribute names to be merged Returns ------- attrs : dict Of merged attributes. """ from astropy.table.np_utils import TableMergeError def warn_str_func(key, left, right): out = ( f"In merged column '{name}' the '{key}' attribute does not match " f"({left} != {right}). Using {right} for merged output" ) return out def getattrs(col): return { attr: getattr(col.info, attr) for attr in attrs if getattr(col.info, attr, None) is not None } out = getattrs(cols[0]) for col in cols[1:]: out = metadata.merge( out, getattrs(col), metadata_conflicts=metadata_conflicts, warn_str_func=warn_str_func, ) # Output dtype is the superset of all dtypes in in_cols out["dtype"] = metadata.common_dtype(cols) # Make sure all input shapes are the same uniq_shapes = {col.shape[1:] for col in cols} if len(uniq_shapes) != 1: raise TableMergeError("columns have different shapes") out["shape"] = uniq_shapes.pop() # "Merged" output name is the supplied name if name is not None: out["name"] = name return out def get_sortable_arrays(self): """ Return a list of arrays which can be lexically sorted to represent the order of the parent column. The base method raises NotImplementedError and must be overridden. Returns ------- arrays : list of ndarray """ raise NotImplementedError(f"column {self.name} is not sortable") class MixinInfo(BaseColumnInfo): @property def name(self): return self._attrs.get("name") @name.setter def name(self, name): # For mixin columns that live within a table, rename the column in the # table when setting the name attribute. This mirrors the same # functionality in the BaseColumn class. if self.parent_table is not None: new_name = None if name is None else str(name) self.parent_table.columns._rename_column(self.name, new_name) self._attrs["name"] = name @property def groups(self): # This implementation for mixin columns essentially matches the Column # property definition. `groups` is a read-only property here and # depends on the parent table of the column having `groups`. This will # allow aggregating mixins as long as they support those operations. from astropy.table import groups return self._attrs.setdefault("groups", groups.ColumnGroups(self._parent)) class ParentDtypeInfo(MixinInfo): """Mixin that gets info.dtype from parent.""" attrs_from_parent = {"dtype"} # dtype and unit taken from parent
666af9b9636ee27f597d54dbdf0ee469459f09239187cc9b831643d35c438180
# Licensed under a 3-clause BSD style license - see LICENSE.rst """The ShapedLikeNDArray mixin class and shape-related functions.""" import abc import numbers from itertools import zip_longest import numpy as np __all__ = [ "NDArrayShapeMethods", "ShapedLikeNDArray", "check_broadcast", "IncompatibleShapeError", "simplify_basic_index", "unbroadcast", ] class NDArrayShapeMethods: """Mixin class to provide shape-changing methods. The class proper is assumed to have some underlying data, which are arrays or array-like structures. It must define a ``shape`` property, which gives the shape of those data, as well as an ``_apply`` method that creates a new instance in which a `~numpy.ndarray` method has been applied to those. Furthermore, for consistency with `~numpy.ndarray`, it is recommended to define a setter for the ``shape`` property, which, like the `~numpy.ndarray.shape` property allows in-place reshaping the internal data (and, unlike the ``reshape`` method raises an exception if this is not possible). This class only provides the shape-changing methods and is meant in particular for `~numpy.ndarray` subclasses that need to keep track of other arrays. For other classes, `~astropy.utils.shapes.ShapedLikeNDArray` is recommended. """ # Note to developers: if new methods are added here, be sure to check that # they work properly with the classes that use this, such as Time and # BaseRepresentation, i.e., look at their ``_apply`` methods and add # relevant tests. This is particularly important for methods that imply # copies rather than views of data (see the special-case treatment of # 'flatten' in Time). def __getitem__(self, item): return self._apply("__getitem__", item) def copy(self, *args, **kwargs): """Return an instance containing copies of the internal data. Parameters are as for :meth:`~numpy.ndarray.copy`. """ return self._apply("copy", *args, **kwargs) def reshape(self, *args, **kwargs): """Returns an instance containing the same data with a new shape. Parameters are as for :meth:`~numpy.ndarray.reshape`. Note that it is not always possible to change the shape of an array without copying the data (see :func:`~numpy.reshape` documentation). If you want an error to be raise if the data is copied, you should assign the new shape to the shape attribute (note: this may not be implemented for all classes using ``NDArrayShapeMethods``). """ return self._apply("reshape", *args, **kwargs) def ravel(self, *args, **kwargs): """Return an instance with the array collapsed into one dimension. Parameters are as for :meth:`~numpy.ndarray.ravel`. Note that it is not always possible to unravel an array without copying the data. If you want an error to be raise if the data is copied, you should should assign shape ``(-1,)`` to the shape attribute. """ return self._apply("ravel", *args, **kwargs) def flatten(self, *args, **kwargs): """Return a copy with the array collapsed into one dimension. Parameters are as for :meth:`~numpy.ndarray.flatten`. """ return self._apply("flatten", *args, **kwargs) def transpose(self, *args, **kwargs): """Return an instance with the data transposed. Parameters are as for :meth:`~numpy.ndarray.transpose`. All internal data are views of the data of the original. """ return self._apply("transpose", *args, **kwargs) @property def T(self): """Return an instance with the data transposed. Parameters are as for :attr:`~numpy.ndarray.T`. All internal data are views of the data of the original. """ if self.ndim < 2: return self else: return self.transpose() def swapaxes(self, *args, **kwargs): """Return an instance with the given axes interchanged. Parameters are as for :meth:`~numpy.ndarray.swapaxes`: ``axis1, axis2``. All internal data are views of the data of the original. """ return self._apply("swapaxes", *args, **kwargs) def diagonal(self, *args, **kwargs): """Return an instance with the specified diagonals. Parameters are as for :meth:`~numpy.ndarray.diagonal`. All internal data are views of the data of the original. """ return self._apply("diagonal", *args, **kwargs) def squeeze(self, *args, **kwargs): """Return an instance with single-dimensional shape entries removed. Parameters are as for :meth:`~numpy.ndarray.squeeze`. All internal data are views of the data of the original. """ return self._apply("squeeze", *args, **kwargs) def take(self, indices, axis=None, out=None, mode="raise"): """Return a new instance formed from the elements at the given indices. Parameters are as for :meth:`~numpy.ndarray.take`, except that, obviously, no output array can be given. """ if out is not None: return NotImplementedError("cannot pass 'out' argument to 'take.") return self._apply("take", indices, axis=axis, mode=mode) class ShapedLikeNDArray(NDArrayShapeMethods, metaclass=abc.ABCMeta): """Mixin class to provide shape-changing methods. The class proper is assumed to have some underlying data, which are arrays or array-like structures. It must define a ``shape`` property, which gives the shape of those data, as well as an ``_apply`` method that creates a new instance in which a `~numpy.ndarray` method has been applied to those. Furthermore, for consistency with `~numpy.ndarray`, it is recommended to define a setter for the ``shape`` property, which, like the `~numpy.ndarray.shape` property allows in-place reshaping the internal data (and, unlike the ``reshape`` method raises an exception if this is not possible). This class also defines default implementations for ``ndim`` and ``size`` properties, calculating those from the ``shape``. These can be overridden by subclasses if there are faster ways to obtain those numbers. """ # Note to developers: if new methods are added here, be sure to check that # they work properly with the classes that use this, such as Time and # BaseRepresentation, i.e., look at their ``_apply`` methods and add # relevant tests. This is particularly important for methods that imply # copies rather than views of data (see the special-case treatment of # 'flatten' in Time). @property @abc.abstractmethod def shape(self): """The shape of the underlying data.""" @abc.abstractmethod def _apply(method, *args, **kwargs): """Create a new instance, with ``method`` applied to underlying data. The method is any of the shape-changing methods for `~numpy.ndarray` (``reshape``, ``swapaxes``, etc.), as well as those picking particular elements (``__getitem__``, ``take``, etc.). It will be applied to the underlying arrays (e.g., ``jd1`` and ``jd2`` in `~astropy.time.Time`), with the results used to create a new instance. Parameters ---------- method : str Method to be applied to the instance's internal data arrays. args : tuple Any positional arguments for ``method``. kwargs : dict Any keyword arguments for ``method``. """ @property def ndim(self): """The number of dimensions of the instance and underlying arrays.""" return len(self.shape) @property def size(self): """The size of the object, as calculated from its shape.""" size = 1 for sh in self.shape: size *= sh return size @property def isscalar(self): return self.shape == () def __len__(self): if self.isscalar: raise TypeError(f"Scalar {self.__class__.__name__!r} object has no len()") return self.shape[0] def __bool__(self): """Any instance should evaluate to True, except when it is empty.""" return self.size > 0 def __getitem__(self, item): try: return self._apply("__getitem__", item) except IndexError: if self.isscalar: raise TypeError( f"scalar {self.__class.__name__!r} object is not subscriptable." ) else: raise def __iter__(self): if self.isscalar: raise TypeError( f"scalar {self.__class__.__name__!r} object is not iterable." ) # We cannot just write a generator here, since then the above error # would only be raised once we try to use the iterator, rather than # upon its definition using iter(self). def self_iter(): for idx in range(len(self)): yield self[idx] return self_iter() # Functions that change shape or essentially do indexing. _APPLICABLE_FUNCTIONS = { np.moveaxis, np.rollaxis, np.atleast_1d, np.atleast_2d, np.atleast_3d, np.expand_dims, np.broadcast_to, np.flip, np.fliplr, np.flipud, np.rot90, np.roll, np.delete, } # Functions that themselves defer to a method. Those are all # defined in np.core.fromnumeric, but exclude alen as well as # sort and partition, which make copies before calling the method. _METHOD_FUNCTIONS = { getattr(np, name): { "amax": "max", "amin": "min", "around": "round", "round_": "round", "alltrue": "all", "sometrue": "any", }.get(name, name) for name in np.core.fromnumeric.__all__ if name not in ["alen", "sort", "partition"] } # Add np.copy, which we may as well let defer to our method. _METHOD_FUNCTIONS[np.copy] = "copy" # Could be made to work with a bit of effort: # np.where, np.compress, np.extract, # np.diag_indices_from, np.triu_indices_from, np.tril_indices_from # np.tile, np.repeat (need .repeat method) # TODO: create a proper implementation. # Furthermore, some arithmetic functions such as np.mean, np.median, # could work for Time, and many more for TimeDelta, so those should # override __array_function__. def __array_function__(self, function, types, args, kwargs): """Wrap numpy functions that make sense.""" if function in self._APPLICABLE_FUNCTIONS: if function is np.broadcast_to: # Ensure that any ndarray subclasses used are # properly propagated. kwargs.setdefault("subok", True) elif ( function in {np.atleast_1d, np.atleast_2d, np.atleast_3d} and len(args) > 1 ): return tuple(function(arg, **kwargs) for arg in args) if self is not args[0]: return NotImplemented return self._apply(function, *args[1:], **kwargs) # For functions that defer to methods, use the corresponding # method/attribute if we have it. Otherwise, fall through. if self is args[0] and function in self._METHOD_FUNCTIONS: method = getattr(self, self._METHOD_FUNCTIONS[function], None) if method is not None: if callable(method): return method(*args[1:], **kwargs) else: # For np.shape, etc., just return the attribute. return method # Fall-back, just pass the arguments on since perhaps the function # works already (see above). return function.__wrapped__(*args, **kwargs) class IncompatibleShapeError(ValueError): def __init__(self, shape_a, shape_a_idx, shape_b, shape_b_idx): super().__init__(shape_a, shape_a_idx, shape_b, shape_b_idx) def check_broadcast(*shapes): """ Determines whether two or more Numpy arrays can be broadcast with each other based on their shape tuple alone. Parameters ---------- *shapes : tuple All shapes to include in the comparison. If only one shape is given it is passed through unmodified. If no shapes are given returns an empty `tuple`. Returns ------- broadcast : `tuple` If all shapes are mutually broadcastable, returns a tuple of the full broadcast shape. """ if len(shapes) == 0: return () elif len(shapes) == 1: return shapes[0] reversed_shapes = (reversed(shape) for shape in shapes) full_shape = [] for dims in zip_longest(*reversed_shapes, fillvalue=1): max_dim = 1 max_dim_idx = None for idx, dim in enumerate(dims): if dim == 1: continue if max_dim == 1: # The first dimension of size greater than 1 max_dim = dim max_dim_idx = idx elif dim != max_dim: raise IncompatibleShapeError( shapes[max_dim_idx], max_dim_idx, shapes[idx], idx ) full_shape.append(max_dim) return tuple(full_shape[::-1]) def unbroadcast(array): """ Given an array, return a new array that is the smallest subset of the original array that can be re-broadcasted back to the original array. See https://stackoverflow.com/questions/40845769/un-broadcasting-numpy-arrays for more details. """ if array.ndim == 0: return array array = array[ tuple((slice(0, 1) if stride == 0 else slice(None)) for stride in array.strides) ] # Remove leading ones, which are not needed in numpy broadcasting. first_not_unity = next( (i for (i, s) in enumerate(array.shape) if s > 1), array.ndim ) return array.reshape(array.shape[first_not_unity:]) def simplify_basic_index(basic_index, *, shape): """ Given a Numpy basic index, return a tuple of integers and slice objects with no default values (`None`) if possible. If one of the dimensions has a slice and the step is negative and the stop value of the slice was originally `None`, the new stop value of the slice may still be set to `None`. For more information on valid basic indices, see https://numpy.org/doc/stable/user/basics.indexing.html#basic-indexing Parameters ---------- basic_index A valid Numpy basic index shape The shape of the array being indexed """ ndim = len(shape) if not isinstance(basic_index, (tuple, list)): # We just have a single int basic_index = (basic_index,) new_index = list(basic_index) if Ellipsis in new_index: if new_index.count(Ellipsis) > 1: raise IndexError("an index can only have a single ellipsis ('...')") # Replace the Ellipsis with the correct number of slice(None)s e_ind = new_index.index(Ellipsis) new_index.remove(Ellipsis) n_e = ndim - len(new_index) for i in range(n_e): ind = e_ind + i new_index.insert(ind, slice(0, shape[ind], 1)) if len(new_index) > ndim: raise ValueError( f"The dimensionality of the basic index {basic_index} can not be greater " f"than the dimensionality ({ndim}) of the data." ) for i in range(ndim): if i < len(new_index): slc = new_index[i] if isinstance(slc, slice): indices = list(slc.indices(shape[i])) # The following case is the only one where slice(*indices) does # not give the 'correct' answer because it will set stop to -1 # which means the last element in the array. if indices[1] == -1: indices[1] = None new_index[i] = slice(*indices) elif isinstance(slc, numbers.Integral): new_index[i] = np.core.multiarray.normalize_axis_index( int(slc), shape[i] ) else: raise ValueError(f"Unexpected index element in basic index: {slc}") else: new_index.append(slice(0, shape[i], 1)) return tuple(new_index)
6fe011d41d070c9fdf9e433a1a5be1da546c5562af0cd91e2978ac6291c57134
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Functions for accessing, downloading, and caching data files.""" import atexit import contextlib import errno import fnmatch import ftplib import functools import hashlib import io import os import re import shutil # import ssl moved inside functions using ssl to avoid import failure # when running in pyodide/Emscripten import sys import urllib.error import urllib.parse import urllib.request import zipfile from tempfile import NamedTemporaryFile, TemporaryDirectory, gettempdir, mkdtemp from warnings import warn try: import certifi except ImportError: # certifi support is optional; when available it will be used for TLS/SSL # downloads certifi = None import astropy.config.paths from astropy import config as _config from astropy.utils.compat.optional_deps import HAS_FSSPEC from astropy.utils.exceptions import AstropyWarning from astropy.utils.introspection import find_current_module, resolve_name # Order here determines order in the autosummary __all__ = [ "Conf", "conf", "download_file", "download_files_in_parallel", "get_readable_fileobj", "get_pkg_data_fileobj", "get_pkg_data_filename", "get_pkg_data_contents", "get_pkg_data_fileobjs", "get_pkg_data_filenames", "get_pkg_data_path", "is_url", "is_url_in_cache", "get_cached_urls", "cache_total_size", "cache_contents", "export_download_cache", "import_download_cache", "import_file_to_cache", "check_download_cache", "clear_download_cache", "compute_hash", "get_free_space_in_dir", "check_free_space_in_dir", "get_file_contents", "CacheMissingWarning", "CacheDamaged", ] _dataurls_to_alias = {} class _NonClosingBufferedReader(io.BufferedReader): def __del__(self): try: # NOTE: self.raw will not be closed, but left in the state # it was in at detactment self.detach() except Exception: pass class _NonClosingTextIOWrapper(io.TextIOWrapper): def __del__(self): try: # NOTE: self.stream will not be closed, but left in the state # it was in at detactment self.detach() except Exception: pass class Conf(_config.ConfigNamespace): """ Configuration parameters for `astropy.utils.data`. """ dataurl = _config.ConfigItem( "http://data.astropy.org/", "Primary URL for astropy remote data site." ) dataurl_mirror = _config.ConfigItem( "http://www.astropy.org/astropy-data/", "Mirror URL for astropy remote data site.", ) default_http_user_agent = _config.ConfigItem( "astropy", "Default User-Agent for HTTP request headers. This can be overwritten " "for a particular call via http_headers option, where available. " "This only provides the default value when not set by https_headers.", ) remote_timeout = _config.ConfigItem( 10.0, "Time to wait for remote data queries (in seconds).", aliases=["astropy.coordinates.name_resolve.name_resolve_timeout"], ) allow_internet = _config.ConfigItem( True, "If False, prevents any attempt to download from Internet." ) compute_hash_block_size = _config.ConfigItem( 2**16, "Block size for computing file hashes." # 64K ) download_block_size = _config.ConfigItem( 2**16, "Number of bytes of remote data to download per step." # 64K ) delete_temporary_downloads_at_exit = _config.ConfigItem( True, "If True, temporary download files created when the cache is " "inaccessible will be deleted at the end of the python session.", ) conf = Conf() class CacheMissingWarning(AstropyWarning): """ This warning indicates the standard cache directory is not accessible, with the first argument providing the warning message. If args[1] is present, it is a filename indicating the path to a temporary file that was created to store a remote data download in the absence of the cache. """ def is_url(string): """ Test whether a string is a valid URL for :func:`download_file`. Parameters ---------- string : str The string to test. Returns ------- status : bool String is URL or not. """ url = urllib.parse.urlparse(string) # we can't just check that url.scheme is not an empty string, because # file paths in windows would return a non-empty scheme (e.g. e:\\ # returns 'e'). return url.scheme.lower() in ["http", "https", "ftp", "sftp", "ssh", "file"] # Backward compatibility because some downstream packages allegedly uses it. _is_url = is_url def _requires_fsspec(url): """Does the `url` require the optional ``fsspec`` dependency to open?""" return isinstance(url, str) and url.startswith(("s3://", "gs://")) def _is_inside(path, parent_path): # We have to try realpath too to avoid issues with symlinks, but we leave # abspath because some systems like debian have the absolute path (with no # symlinks followed) match, but the real directories in different # locations, so need to try both cases. return os.path.abspath(path).startswith( os.path.abspath(parent_path) ) or os.path.realpath(path).startswith(os.path.realpath(parent_path)) @contextlib.contextmanager def get_readable_fileobj( name_or_obj, encoding=None, cache=False, show_progress=True, remote_timeout=None, sources=None, http_headers=None, *, use_fsspec=None, fsspec_kwargs=None, close_files=True, ): """Yield a readable, seekable file-like object from a file or URL. This supports passing filenames, URLs, and readable file-like objects, any of which can be compressed in gzip, bzip2 or lzma (xz) if the appropriate compression libraries are provided by the Python installation. Notes ----- This function is a context manager, and should be used for example as:: with get_readable_fileobj('file.dat') as f: contents = f.read() If a URL is provided and the cache is in use, the provided URL will be the name used in the cache. The contents may already be stored in the cache under this URL provided, they may be downloaded from this URL, or they may be downloaded from one of the locations listed in ``sources``. See `~download_file` for details. Parameters ---------- name_or_obj : str or file-like The filename of the file to access (if given as a string), or the file-like object to access. If a file-like object, it must be opened in binary mode. encoding : str, optional When `None` (default), returns a file-like object with a ``read`` method that returns `str` (``unicode``) objects, using `locale.getpreferredencoding` as an encoding. This matches the default behavior of the built-in `open` when no ``mode`` argument is provided. When ``'binary'``, returns a file-like object where its ``read`` method returns `bytes` objects. When another string, it is the name of an encoding, and the file-like object's ``read`` method will return `str` (``unicode``) objects, decoded from binary using the given encoding. cache : bool or "update", optional Whether to cache the contents of remote URLs. If "update", check the remote URL for a new version but store the result in the cache. show_progress : bool, optional Whether to display a progress bar if the file is downloaded from a remote server. Default is `True`. remote_timeout : float Timeout for remote requests in seconds (default is the configurable `astropy.utils.data.Conf.remote_timeout`). sources : list of str, optional If provided, a list of URLs to try to obtain the file from. The result will be stored under the original URL. The original URL will *not* be tried unless it is in this list; this is to prevent long waits for a primary server that is known to be inaccessible at the moment. http_headers : dict or None HTTP request headers to pass into ``urlopen`` if needed. (These headers are ignored if the protocol for the ``name_or_obj``/``sources`` entry is not a remote HTTP URL.) In the default case (None), the headers are ``User-Agent: some_value`` and ``Accept: */*``, where ``some_value`` is set by ``astropy.utils.data.conf.default_http_user_agent``. use_fsspec : bool, optional Use `fsspec.open` to open the file? Defaults to `False` unless ``name_or_obj`` starts with the Amazon S3 storage prefix ``s3://`` or the Google Cloud Storage prefix ``gs://``. Can also be used for paths with other prefixes (e.g. ``http://``) but in this case you must explicitly pass ``use_fsspec=True``. Use of this feature requires the optional ``fsspec`` package. A ``ModuleNotFoundError`` will be raised if the dependency is missing. .. versionadded:: 5.2 fsspec_kwargs : dict, optional Keyword arguments passed on to `fsspec.open`. This can be used to configure cloud storage credentials and caching behavior. For example, pass ``fsspec_kwargs={"anon": True}`` to enable anonymous access to Amazon S3 open data buckets. See ``fsspec``'s documentation for available parameters. .. versionadded:: 5.2 close_files : bool, optional Close the file object when exiting the context manager. Default is `True`. .. versionadded:: 5.2 Returns ------- file : readable file-like """ # close_fds is a list of file handles created by this function # that need to be closed. We don't want to always just close the # returned file handle, because it may simply be the file handle # passed in. In that case it is not the responsibility of this # function to close it: doing so could result in a "double close" # and an "invalid file descriptor" exception. close_fds = [] delete_fds = [] if remote_timeout is None: # use configfile default remote_timeout = conf.remote_timeout # Have `use_fsspec` default to ``True`` if the user passed an Amazon S3 # or Google Cloud Storage URI. if use_fsspec is None and _requires_fsspec(name_or_obj): use_fsspec = True if use_fsspec: if not isinstance(name_or_obj, str): raise TypeError("`name_or_obj` must be a string when `use_fsspec=True`") if fsspec_kwargs is None: fsspec_kwargs = {} # name_or_obj could be an os.PathLike object if isinstance(name_or_obj, os.PathLike): name_or_obj = os.fspath(name_or_obj) # Get a file object to the content if isinstance(name_or_obj, str): # Use fsspec to open certain cloud-hosted files (e.g., AWS S3, Google Cloud Storage) if use_fsspec: if not HAS_FSSPEC: raise ModuleNotFoundError("please install `fsspec` to open this file") import fsspec # local import because it is a niche dependency openfileobj = fsspec.open(name_or_obj, **fsspec_kwargs) close_fds.append(openfileobj) fileobj = openfileobj.open() close_fds.append(fileobj) else: is_url = _is_url(name_or_obj) if is_url: name_or_obj = download_file( name_or_obj, cache=cache, show_progress=show_progress, timeout=remote_timeout, sources=sources, http_headers=http_headers, ) fileobj = io.FileIO(name_or_obj, "r") if is_url and not cache: delete_fds.append(fileobj) close_fds.append(fileobj) else: fileobj = name_or_obj # Check if the file object supports random access, and if not, # then wrap it in a BytesIO buffer. It would be nicer to use a # BufferedReader to avoid reading loading the whole file first, # but that might not be compatible with all possible I/O classes. if not hasattr(fileobj, "seek"): try: # py.path.LocalPath objects have .read() method but it uses # text mode, which won't work. .read_binary() does, and # surely other ducks would return binary contents when # called like this. # py.path.LocalPath is what comes from the legacy tmpdir fixture # in pytest. fileobj = io.BytesIO(fileobj.read_binary()) except AttributeError: fileobj = io.BytesIO(fileobj.read()) # Now read enough bytes to look at signature signature = fileobj.read(4) fileobj.seek(0) if signature[:3] == b"\x1f\x8b\x08": # gzip import struct try: import gzip fileobj_new = gzip.GzipFile(fileobj=fileobj, mode="rb") fileobj_new.read(1) # need to check that the file is really gzip except (OSError, EOFError, struct.error): # invalid gzip file fileobj.seek(0) fileobj_new.close() else: fileobj_new.seek(0) fileobj = fileobj_new elif signature[:3] == b"BZh": # bzip2 try: import bz2 except ImportError: for fd in close_fds: fd.close() raise ModuleNotFoundError( "This Python installation does not provide the bz2 module." ) try: # bz2.BZ2File does not support file objects, only filenames, so we # need to write the data to a temporary file with NamedTemporaryFile("wb", delete=False) as tmp: tmp.write(fileobj.read()) tmp.close() fileobj_new = bz2.BZ2File(tmp.name, mode="rb") fileobj_new.read(1) # need to check that the file is really bzip2 except OSError: # invalid bzip2 file fileobj.seek(0) fileobj_new.close() # raise else: fileobj_new.seek(0) close_fds.append(fileobj_new) fileobj = fileobj_new elif signature[:3] == b"\xfd7z": # xz try: import lzma fileobj_new = lzma.LZMAFile(fileobj, mode="rb") fileobj_new.read(1) # need to check that the file is really xz except ImportError: for fd in close_fds: fd.close() raise ModuleNotFoundError( "This Python installation does not provide the lzma module." ) except (OSError, EOFError): # invalid xz file fileobj.seek(0) fileobj_new.close() # should we propagate this to the caller to signal bad content? # raise ValueError(e) else: fileobj_new.seek(0) fileobj = fileobj_new # By this point, we have a file, io.FileIO, gzip.GzipFile, bz2.BZ2File # or lzma.LZMAFile instance opened in binary mode (that is, read # returns bytes). Now we need to, if requested, wrap it in a # io.TextIOWrapper so read will return unicode based on the # encoding parameter. needs_textio_wrapper = encoding != "binary" if needs_textio_wrapper: # A bz2.BZ2File can not be wrapped by a TextIOWrapper, # so we decompress it to a temporary file and then # return a handle to that. try: import bz2 except ImportError: pass else: if isinstance(fileobj, bz2.BZ2File): tmp = NamedTemporaryFile("wb", delete=False) data = fileobj.read() tmp.write(data) tmp.close() delete_fds.append(tmp) fileobj = io.FileIO(tmp.name, "r") close_fds.append(fileobj) fileobj = _NonClosingBufferedReader(fileobj) fileobj = _NonClosingTextIOWrapper(fileobj, encoding=encoding) # Ensure that file is at the start - io.FileIO will for # example not always be at the start: # >>> import io # >>> f = open('test.fits', 'rb') # >>> f.read(4) # 'SIMP' # >>> f.seek(0) # >>> fileobj = io.FileIO(f.fileno()) # >>> fileobj.tell() # 4096L fileobj.seek(0) try: yield fileobj finally: if close_files: for fd in close_fds: fd.close() for fd in delete_fds: os.remove(fd.name) def get_file_contents(*args, **kwargs): """ Retrieves the contents of a filename or file-like object. See the `get_readable_fileobj` docstring for details on parameters. Returns ------- object The content of the file (as requested by ``encoding``). """ with get_readable_fileobj(*args, **kwargs) as f: return f.read() @contextlib.contextmanager def get_pkg_data_fileobj(data_name, package=None, encoding=None, cache=True): """ Retrieves a data file from the standard locations for the package and provides the file as a file-like object that reads bytes. Parameters ---------- data_name : str Name/location of the desired data file. One of the following: * The name of a data file included in the source distribution. The path is relative to the module calling this function. For example, if calling from ``astropy.pkname``, use ``'data/file.dat'`` to get the file in ``astropy/pkgname/data/file.dat``. Double-dots can be used to go up a level. In the same example, use ``'../data/file.dat'`` to get ``astropy/data/file.dat``. * If a matching local file does not exist, the Astropy data server will be queried for the file. * A hash like that produced by `compute_hash` can be requested, prefixed by 'hash/' e.g. 'hash/34c33b3eb0d56eb9462003af249eff28'. The hash will first be searched for locally, and if not found, the Astropy data server will be queried. package : str, optional If specified, look for a file relative to the given package, rather than the default of looking relative to the calling module's package. encoding : str, optional When `None` (default), returns a file-like object with a ``read`` method returns `str` (``unicode``) objects, using `locale.getpreferredencoding` as an encoding. This matches the default behavior of the built-in `open` when no ``mode`` argument is provided. When ``'binary'``, returns a file-like object where its ``read`` method returns `bytes` objects. When another string, it is the name of an encoding, and the file-like object's ``read`` method will return `str` (``unicode``) objects, decoded from binary using the given encoding. cache : bool If True, the file will be downloaded and saved locally or the already-cached local copy will be accessed. If False, the file-like object will directly access the resource (e.g. if a remote URL is accessed, an object like that from `urllib.request.urlopen` is returned). Returns ------- fileobj : file-like An object with the contents of the data file available via ``read`` function. Can be used as part of a ``with`` statement, automatically closing itself after the ``with`` block. Raises ------ urllib.error.URLError If a remote file cannot be found. OSError If problems occur writing or reading a local file. Examples -------- This will retrieve a data file and its contents for the `astropy.wcs` tests:: >>> from astropy.utils.data import get_pkg_data_fileobj >>> with get_pkg_data_fileobj('data/3d_cd.hdr', ... package='astropy.wcs.tests') as fobj: ... fcontents = fobj.read() ... This next example would download a data file from the astropy data server because the ``allsky/allsky_rosat.fits`` file is not present in the source distribution. It will also save the file locally so the next time it is accessed it won't need to be downloaded.:: >>> from astropy.utils.data import get_pkg_data_fileobj >>> with get_pkg_data_fileobj('allsky/allsky_rosat.fits', ... encoding='binary') as fobj: # doctest: +REMOTE_DATA +IGNORE_OUTPUT ... fcontents = fobj.read() ... Downloading http://data.astropy.org/allsky/allsky_rosat.fits [Done] This does the same thing but does *not* cache it locally:: >>> with get_pkg_data_fileobj('allsky/allsky_rosat.fits', ... encoding='binary', cache=False) as fobj: # doctest: +REMOTE_DATA +IGNORE_OUTPUT ... fcontents = fobj.read() ... Downloading http://data.astropy.org/allsky/allsky_rosat.fits [Done] See Also -------- get_pkg_data_contents : returns the contents of a file or url as a bytes object get_pkg_data_filename : returns a local name for a file containing the data """ datafn = get_pkg_data_path(data_name, package=package) if os.path.isdir(datafn): raise OSError( "Tried to access a data file that's actually a package data directory" ) elif os.path.isfile(datafn): # local file with get_readable_fileobj(datafn, encoding=encoding) as fileobj: yield fileobj else: # remote file with get_readable_fileobj( conf.dataurl + data_name, encoding=encoding, cache=cache, sources=[conf.dataurl + data_name, conf.dataurl_mirror + data_name], ) as fileobj: # We read a byte to trigger any URLErrors fileobj.read(1) fileobj.seek(0) yield fileobj def get_pkg_data_filename( data_name, package=None, show_progress=True, remote_timeout=None ): """ Retrieves a data file from the standard locations for the package and provides a local filename for the data. This function is similar to `get_pkg_data_fileobj` but returns the file *name* instead of a readable file-like object. This means that this function must always cache remote files locally, unlike `get_pkg_data_fileobj`. Parameters ---------- data_name : str Name/location of the desired data file. One of the following: * The name of a data file included in the source distribution. The path is relative to the module calling this function. For example, if calling from ``astropy.pkname``, use ``'data/file.dat'`` to get the file in ``astropy/pkgname/data/file.dat``. Double-dots can be used to go up a level. In the same example, use ``'../data/file.dat'`` to get ``astropy/data/file.dat``. * If a matching local file does not exist, the Astropy data server will be queried for the file. * A hash like that produced by `compute_hash` can be requested, prefixed by 'hash/' e.g. 'hash/34c33b3eb0d56eb9462003af249eff28'. The hash will first be searched for locally, and if not found, the Astropy data server will be queried. package : str, optional If specified, look for a file relative to the given package, rather than the default of looking relative to the calling module's package. show_progress : bool, optional Whether to display a progress bar if the file is downloaded from a remote server. Default is `True`. remote_timeout : float Timeout for the requests in seconds (default is the configurable `astropy.utils.data.Conf.remote_timeout`). Raises ------ urllib.error.URLError If a remote file cannot be found. OSError If problems occur writing or reading a local file. Returns ------- filename : str A file path on the local file system corresponding to the data requested in ``data_name``. Examples -------- This will retrieve the contents of the data file for the `astropy.wcs` tests:: >>> from astropy.utils.data import get_pkg_data_filename >>> fn = get_pkg_data_filename('data/3d_cd.hdr', ... package='astropy.wcs.tests') >>> with open(fn) as f: ... fcontents = f.read() ... This retrieves a data file by hash either locally or from the astropy data server:: >>> from astropy.utils.data import get_pkg_data_filename >>> fn = get_pkg_data_filename('hash/34c33b3eb0d56eb9462003af249eff28') # doctest: +SKIP >>> with open(fn) as f: ... fcontents = f.read() ... See Also -------- get_pkg_data_contents : returns the contents of a file or url as a bytes object get_pkg_data_fileobj : returns a file-like object with the data """ if remote_timeout is None: # use configfile default remote_timeout = conf.remote_timeout if data_name.startswith("hash/"): # first try looking for a local version if a hash is specified hashfn = _find_hash_fn(data_name[5:]) if hashfn is None: return download_file( conf.dataurl + data_name, cache=True, show_progress=show_progress, timeout=remote_timeout, sources=[conf.dataurl + data_name, conf.dataurl_mirror + data_name], ) else: return hashfn else: fs_path = os.path.normpath(data_name) datafn = get_pkg_data_path(fs_path, package=package) if os.path.isdir(datafn): raise OSError( "Tried to access a data file that's actually a package data directory" ) elif os.path.isfile(datafn): # local file return datafn else: # remote file return download_file( conf.dataurl + data_name, cache=True, show_progress=show_progress, timeout=remote_timeout, sources=[conf.dataurl + data_name, conf.dataurl_mirror + data_name], ) def get_pkg_data_contents(data_name, package=None, encoding=None, cache=True): """ Retrieves a data file from the standard locations and returns its contents as a bytes object. Parameters ---------- data_name : str Name/location of the desired data file. One of the following: * The name of a data file included in the source distribution. The path is relative to the module calling this function. For example, if calling from ``astropy.pkname``, use ``'data/file.dat'`` to get the file in ``astropy/pkgname/data/file.dat``. Double-dots can be used to go up a level. In the same example, use ``'../data/file.dat'`` to get ``astropy/data/file.dat``. * If a matching local file does not exist, the Astropy data server will be queried for the file. * A hash like that produced by `compute_hash` can be requested, prefixed by 'hash/' e.g. 'hash/34c33b3eb0d56eb9462003af249eff28'. The hash will first be searched for locally, and if not found, the Astropy data server will be queried. * A URL to some other file. package : str, optional If specified, look for a file relative to the given package, rather than the default of looking relative to the calling module's package. encoding : str, optional When `None` (default), returns a file-like object with a ``read`` method that returns `str` (``unicode``) objects, using `locale.getpreferredencoding` as an encoding. This matches the default behavior of the built-in `open` when no ``mode`` argument is provided. When ``'binary'``, returns a file-like object where its ``read`` method returns `bytes` objects. When another string, it is the name of an encoding, and the file-like object's ``read`` method will return `str` (``unicode``) objects, decoded from binary using the given encoding. cache : bool If True, the file will be downloaded and saved locally or the already-cached local copy will be accessed. If False, the file-like object will directly access the resource (e.g. if a remote URL is accessed, an object like that from `urllib.request.urlopen` is returned). Returns ------- contents : bytes The complete contents of the file as a bytes object. Raises ------ urllib.error.URLError If a remote file cannot be found. OSError If problems occur writing or reading a local file. See Also -------- get_pkg_data_fileobj : returns a file-like object with the data get_pkg_data_filename : returns a local name for a file containing the data """ with get_pkg_data_fileobj( data_name, package=package, encoding=encoding, cache=cache ) as fd: contents = fd.read() return contents def get_pkg_data_filenames(datadir, package=None, pattern="*"): """ Returns the path of all of the data files in a given directory that match a given glob pattern. Parameters ---------- datadir : str Name/location of the desired data files. One of the following: * The name of a directory included in the source distribution. The path is relative to the module calling this function. For example, if calling from ``astropy.pkname``, use ``'data'`` to get the files in ``astropy/pkgname/data``. * Remote URLs are not currently supported. package : str, optional If specified, look for a file relative to the given package, rather than the default of looking relative to the calling module's package. pattern : str, optional A UNIX-style filename glob pattern to match files. See the `glob` module in the standard library for more information. By default, matches all files. Returns ------- filenames : iterator of str Paths on the local filesystem in *datadir* matching *pattern*. Examples -------- This will retrieve the contents of the data file for the `astropy.wcs` tests:: >>> from astropy.utils.data import get_pkg_data_filenames >>> for fn in get_pkg_data_filenames('data/maps', 'astropy.wcs.tests', ... '*.hdr'): ... with open(fn) as f: ... fcontents = f.read() ... """ path = get_pkg_data_path(datadir, package=package) if os.path.isfile(path): raise OSError( "Tried to access a data directory that's actually a package data file" ) elif os.path.isdir(path): for filename in os.listdir(path): if fnmatch.fnmatch(filename, pattern): yield os.path.join(path, filename) else: raise OSError("Path not found") def get_pkg_data_fileobjs(datadir, package=None, pattern="*", encoding=None): """ Returns readable file objects for all of the data files in a given directory that match a given glob pattern. Parameters ---------- datadir : str Name/location of the desired data files. One of the following: * The name of a directory included in the source distribution. The path is relative to the module calling this function. For example, if calling from ``astropy.pkname``, use ``'data'`` to get the files in ``astropy/pkgname/data`` * Remote URLs are not currently supported package : str, optional If specified, look for a file relative to the given package, rather than the default of looking relative to the calling module's package. pattern : str, optional A UNIX-style filename glob pattern to match files. See the `glob` module in the standard library for more information. By default, matches all files. encoding : str, optional When `None` (default), returns a file-like object with a ``read`` method that returns `str` (``unicode``) objects, using `locale.getpreferredencoding` as an encoding. This matches the default behavior of the built-in `open` when no ``mode`` argument is provided. When ``'binary'``, returns a file-like object where its ``read`` method returns `bytes` objects. When another string, it is the name of an encoding, and the file-like object's ``read`` method will return `str` (``unicode``) objects, decoded from binary using the given encoding. Returns ------- fileobjs : iterator of file object File objects for each of the files on the local filesystem in *datadir* matching *pattern*. Examples -------- This will retrieve the contents of the data file for the `astropy.wcs` tests:: >>> from astropy.utils.data import get_pkg_data_filenames >>> for fd in get_pkg_data_fileobjs('data/maps', 'astropy.wcs.tests', ... '*.hdr'): ... fcontents = fd.read() ... """ for fn in get_pkg_data_filenames(datadir, package=package, pattern=pattern): with get_readable_fileobj(fn, encoding=encoding) as fd: yield fd def compute_hash(localfn): """Computes the MD5 hash for a file. The hash for a data file is used for looking up data files in a unique fashion. This is of particular use for tests; a test may require a particular version of a particular file, in which case it can be accessed via hash to get the appropriate version. Typically, if you wish to write a test that requires a particular data file, you will want to submit that file to the astropy data servers, and use e.g. ``get_pkg_data_filename('hash/34c33b3eb0d56eb9462003af249eff28')``, but with the hash for your file in place of the hash in the example. Parameters ---------- localfn : str The path to the file for which the hash should be generated. Returns ------- hash : str The hex digest of the cryptographic hash for the contents of the ``localfn`` file. """ with open(localfn, "rb") as f: h = hashlib.md5() block = f.read(conf.compute_hash_block_size) while block: h.update(block) block = f.read(conf.compute_hash_block_size) return h.hexdigest() def get_pkg_data_path(*path, package=None): """Get path from source-included data directories. Parameters ---------- *path : str Name/location of the desired data file/directory. May be a tuple of strings for ``os.path`` joining. package : str or None, optional, keyword-only If specified, look for a file relative to the given package, rather than the calling module's package. Returns ------- path : str Name/location of the desired data file/directory. Raises ------ ImportError Given package or module is not importable. RuntimeError If the local data file is outside of the package's tree. """ if package is None: module = find_current_module(1, finddiff=["astropy.utils.data", "contextlib"]) if module is None: # not called from inside an astropy package. So just pass name # through return os.path.join(*path) if not hasattr(module, "__package__") or not module.__package__: # The __package__ attribute may be missing or set to None; see # PEP-366, also astropy issue #1256 if "." in module.__name__: package = module.__name__.rpartition(".")[0] else: package = module.__name__ else: package = module.__package__ else: # package errors if it isn't a str # so there is no need for checks in the containing if/else module = resolve_name(package) # module path within package module_path = os.path.dirname(module.__file__) full_path = os.path.join(module_path, *path) # Check that file is inside tree. rootpkgname = package.partition(".")[0] rootpkg = resolve_name(rootpkgname) root_dir = os.path.dirname(rootpkg.__file__) if not _is_inside(full_path, root_dir): raise RuntimeError( f"attempted to get a local data file outside of the {rootpkgname} tree." ) return full_path def _find_hash_fn(hexdigest, pkgname="astropy"): """ Looks for a local file by hash - returns file name if found and a valid file, otherwise returns None. """ for v in cache_contents(pkgname=pkgname).values(): if compute_hash(v) == hexdigest: return v return None def get_free_space_in_dir(path, unit=False): """ Given a path to a directory, returns the amount of free space on that filesystem. Parameters ---------- path : str The path to a directory. unit : bool or `~astropy.units.Unit` Return the amount of free space as Quantity in the given unit, if provided. Default is `False` for backward-compatibility. Returns ------- free_space : int or `~astropy.units.Quantity` The amount of free space on the partition that the directory is on. If ``unit=False``, it is returned as plain integer (in bytes). """ if not os.path.isdir(path): raise OSError( "Can only determine free space associated with directories, not files." ) # Actually you can on Linux but I want to avoid code that fails # on Windows only. free_space = shutil.disk_usage(path).free if unit: from astropy import units as u # TODO: Automatically determine best prefix to use. if unit is True: unit = u.byte free_space = u.Quantity(free_space, u.byte).to(unit) return free_space def check_free_space_in_dir(path, size): """ Determines if a given directory has enough space to hold a file of a given size. Parameters ---------- path : str The path to a directory. size : int or `~astropy.units.Quantity` A proposed filesize. If not a Quantity, assume it is in bytes. Raises ------ OSError There is not enough room on the filesystem. """ space = get_free_space_in_dir(path, unit=getattr(size, "unit", False)) if space < size: from astropy.utils.console import human_file_size raise OSError( f"Not enough free space in {path} " f"to download a {human_file_size(size)} file, " f"only {human_file_size(space)} left" ) class _ftptlswrapper(urllib.request.ftpwrapper): def init(self): self.busy = 0 self.ftp = ftplib.FTP_TLS() self.ftp.connect(self.host, self.port, self.timeout) self.ftp.login(self.user, self.passwd) self.ftp.prot_p() _target = "/".join(self.dirs) self.ftp.cwd(_target) class _FTPTLSHandler(urllib.request.FTPHandler): def connect_ftp(self, user, passwd, host, port, dirs, timeout): return _ftptlswrapper(user, passwd, host, port, dirs, timeout, persistent=False) @functools.lru_cache def _build_urlopener(ftp_tls=False, ssl_context=None, allow_insecure=False): """ Helper for building a `urllib.request.build_opener` which handles TLS/SSL. """ # Import ssl here to avoid import failure when running in pyodide/Emscripten import ssl ssl_context = dict(it for it in ssl_context) if ssl_context else {} cert_chain = {} if "certfile" in ssl_context: cert_chain.update( { "certfile": ssl_context.pop("certfile"), "keyfile": ssl_context.pop("keyfile", None), "password": ssl_context.pop("password", None), } ) elif "password" in ssl_context or "keyfile" in ssl_context: raise ValueError( "passing 'keyfile' or 'password' in the ssl_context argument " "requires passing 'certfile' as well" ) if "cafile" not in ssl_context and certifi is not None: ssl_context["cafile"] = certifi.where() ssl_context = ssl.create_default_context(**ssl_context) if allow_insecure: ssl_context.check_hostname = False ssl_context.verify_mode = ssl.CERT_NONE if cert_chain: ssl_context.load_cert_chain(**cert_chain) https_handler = urllib.request.HTTPSHandler(context=ssl_context) if ftp_tls: urlopener = urllib.request.build_opener(_FTPTLSHandler(), https_handler) else: urlopener = urllib.request.build_opener(https_handler) return urlopener def _try_url_open( source_url, timeout=None, http_headers=None, ftp_tls=False, ssl_context=None, allow_insecure=False, ): """Helper for opening a URL while handling TLS/SSL verification issues.""" # Import ssl here to avoid import failure when running in pyodide/Emscripten import ssl # Always try first with a secure connection # _build_urlopener uses lru_cache, so the ssl_context argument must be # converted to a hashshable type (a set of 2-tuples) ssl_context = frozenset(ssl_context.items() if ssl_context else []) urlopener = _build_urlopener( ftp_tls=ftp_tls, ssl_context=ssl_context, allow_insecure=False ) req = urllib.request.Request(source_url, headers=http_headers) try: return urlopener.open(req, timeout=timeout) except urllib.error.URLError as exc: reason = exc.reason if ( isinstance(reason, ssl.SSLError) and reason.reason == "CERTIFICATE_VERIFY_FAILED" ): msg = ( f"Verification of TLS/SSL certificate at {source_url} " "failed: this can mean either the server is " "misconfigured or your local root CA certificates are " "out-of-date; in the latter case this can usually be " 'addressed by installing the Python package "certifi" ' "(see the documentation for astropy.utils.data.download_url)" ) if not allow_insecure: msg += ( " or in both cases you can work around this by " "passing allow_insecure=True, but only if you " "understand the implications; the original error " f"was: {reason}" ) raise urllib.error.URLError(msg) else: msg += ". Re-trying with allow_insecure=True." warn(msg, AstropyWarning) # Try again with a new urlopener allowing insecure connections urlopener = _build_urlopener( ftp_tls=ftp_tls, ssl_context=ssl_context, allow_insecure=True ) return urlopener.open(req, timeout=timeout) raise def _download_file_from_source( source_url, show_progress=True, timeout=None, remote_url=None, cache=False, pkgname="astropy", http_headers=None, ftp_tls=None, ssl_context=None, allow_insecure=False, ): from astropy.utils.console import ProgressBarOrSpinner if not conf.allow_internet: raise urllib.error.URLError( f"URL {remote_url} was supposed to be downloaded but " f"allow_internet is {conf.allow_internet}; " "if this is unexpected check the astropy.cfg file for the option " "allow_internet" ) if remote_url is None: remote_url = source_url if http_headers is None: http_headers = {} if ftp_tls is None and urllib.parse.urlparse(remote_url).scheme == "ftp": try: return _download_file_from_source( source_url, show_progress=show_progress, timeout=timeout, remote_url=remote_url, cache=cache, pkgname=pkgname, http_headers=http_headers, ftp_tls=False, ) except urllib.error.URLError as e: # e.reason might not be a string, e.g. socket.gaierror # URLError changed to report original exception in Python 3.10, 3.11 (bpo-43564) if str(e.reason).lstrip("ftp error: ").startswith(("error_perm", "5")): ftp_tls = True else: raise with _try_url_open( source_url, timeout=timeout, http_headers=http_headers, ftp_tls=ftp_tls, ssl_context=ssl_context, allow_insecure=allow_insecure, ) as remote: info = remote.info() try: size = int(info["Content-Length"]) except (KeyError, ValueError, TypeError): size = None if size is not None: check_free_space_in_dir(gettempdir(), size) if cache: dldir = _get_download_cache_loc(pkgname) check_free_space_in_dir(dldir, size) # If a user has overridden sys.stdout it might not have the # isatty method, in that case assume it's not a tty is_tty = hasattr(sys.stdout, "isatty") and sys.stdout.isatty() if show_progress and is_tty: progress_stream = sys.stdout else: progress_stream = io.StringIO() if source_url == remote_url: dlmsg = f"Downloading {remote_url}" else: dlmsg = f"Downloading {remote_url} from {source_url}" with ProgressBarOrSpinner(size, dlmsg, file=progress_stream) as p: with NamedTemporaryFile( prefix=f"astropy-download-{os.getpid()}-", delete=False ) as f: try: bytes_read = 0 block = remote.read(conf.download_block_size) while block: f.write(block) bytes_read += len(block) p.update(bytes_read) block = remote.read(conf.download_block_size) if size is not None and bytes_read > size: raise urllib.error.URLError( f"File was supposed to be {size} bytes but " f"server provides more, at least {bytes_read} " "bytes. Download failed." ) if size is not None and bytes_read < size: raise urllib.error.ContentTooShortError( f"File was supposed to be {size} bytes but we " f"only got {bytes_read} bytes. Download failed.", content=None, ) except BaseException: if os.path.exists(f.name): try: os.remove(f.name) except OSError: pass raise return f.name def download_file( remote_url, cache=False, show_progress=True, timeout=None, sources=None, pkgname="astropy", http_headers=None, ssl_context=None, allow_insecure=False, ): """Downloads a URL and optionally caches the result. It returns the filename of a file containing the URL's contents. If ``cache=True`` and the file is present in the cache, just returns the filename; if the file had to be downloaded, add it to the cache. If ``cache="update"`` always download and add it to the cache. The cache is effectively a dictionary mapping URLs to files; by default the file contains the contents of the URL that is its key, but in practice these can be obtained from a mirror (using ``sources``) or imported from the local filesystem (using `~import_file_to_cache` or `~import_download_cache`). Regardless, each file is regarded as representing the contents of a particular URL, and this URL should be used to look them up or otherwise manipulate them. The files in the cache directory are named according to a cryptographic hash of their URLs (currently MD5, so hackers can cause collisions). The modification times on these files normally indicate when they were last downloaded from the Internet. Parameters ---------- remote_url : str The URL of the file to download cache : bool or "update", optional Whether to cache the contents of remote URLs. If "update", always download the remote URL in case there is a new version and store the result in the cache. show_progress : bool, optional Whether to display a progress bar during the download (default is `True`). Regardless of this setting, the progress bar is only displayed when outputting to a terminal. timeout : float, optional Timeout for remote requests in seconds (default is the configurable `astropy.utils.data.Conf.remote_timeout`). sources : list of str, optional If provided, a list of URLs to try to obtain the file from. The result will be stored under the original URL. The original URL will *not* be tried unless it is in this list; this is to prevent long waits for a primary server that is known to be inaccessible at the moment. If an empty list is passed, then ``download_file`` will not attempt to connect to the Internet, that is, if the file is not in the cache a KeyError will be raised. pkgname : `str`, optional The package name to use to locate the download cache. i.e. for ``pkgname='astropy'`` the default cache location is ``~/.astropy/cache``. http_headers : dict or None HTTP request headers to pass into ``urlopen`` if needed. (These headers are ignored if the protocol for the ``name_or_obj``/``sources`` entry is not a remote HTTP URL.) In the default case (None), the headers are ``User-Agent: some_value`` and ``Accept: */*``, where ``some_value`` is set by ``astropy.utils.data.conf.default_http_user_agent``. ssl_context : dict, optional Keyword arguments to pass to `ssl.create_default_context` when downloading from HTTPS or TLS+FTP sources. This can be used provide alternative paths to root CA certificates. Additionally, if the key ``'certfile'`` and optionally ``'keyfile'`` and ``'password'`` are included, they are passed to `ssl.SSLContext.load_cert_chain`. This can be used for performing SSL/TLS client certificate authentication for servers that require it. allow_insecure : bool, optional Allow downloading files over a TLS/SSL connection even when the server certificate verification failed. When set to `True` the potentially insecure download is allowed to proceed, but an `~astropy.utils.exceptions.AstropyWarning` is issued. If you are frequently getting certificate verification warnings, consider installing or upgrading `certifi`_ package, which provides frequently updated certificates for common root CAs (i.e., a set similar to those used by web browsers). If installed, Astropy will use it automatically. .. _certifi: https://pypi.org/project/certifi/ Returns ------- local_path : str Returns the local path that the file was download to. Raises ------ urllib.error.URLError Whenever there's a problem getting the remote file. KeyError When a file was requested from the cache but is missing and no sources were provided to obtain it from the Internet. Notes ----- Because this function returns a filename, another process could run `clear_download_cache` before you actually open the file, leaving you with a filename that no longer points to a usable file. """ if timeout is None: timeout = conf.remote_timeout if sources is None: sources = [remote_url] if http_headers is None: http_headers = {"User-Agent": conf.default_http_user_agent, "Accept": "*/*"} missing_cache = "" url_key = remote_url if cache: try: dldir = _get_download_cache_loc(pkgname) except OSError as e: cache = False missing_cache = ( f"Cache directory cannot be read or created ({e}), " "providing data in temporary file instead." ) else: if cache == "update": pass elif isinstance(cache, str): raise ValueError( f"Cache value '{cache}' was requested but " "'update' is the only recognized string; " "otherwise use a boolean" ) else: filename = os.path.join(dldir, _url_to_dirname(url_key), "contents") if os.path.exists(filename): return os.path.abspath(filename) errors = {} for source_url in sources: try: f_name = _download_file_from_source( source_url, timeout=timeout, show_progress=show_progress, cache=cache, remote_url=remote_url, pkgname=pkgname, http_headers=http_headers, ssl_context=ssl_context, allow_insecure=allow_insecure, ) # Success! break except urllib.error.URLError as e: # errno 8 is from SSL "EOF occurred in violation of protocol" if ( hasattr(e, "reason") and hasattr(e.reason, "errno") and e.reason.errno == 8 ): e.reason.strerror = f"{e.reason.strerror}. requested URL: {remote_url}" e.reason.args = (e.reason.errno, e.reason.strerror) errors[source_url] = e else: # No success if not sources: raise KeyError( f"No sources listed and file {remote_url} not in cache! " "Please include primary URL in sources if you want it to be " "included as a valid source." ) elif len(sources) == 1: raise errors[sources[0]] else: raise urllib.error.URLError( f"Unable to open any source! Exceptions were {errors}" ) from errors[sources[0]] if cache: try: return import_file_to_cache( url_key, f_name, remove_original=True, replace=(cache == "update"), pkgname=pkgname, ) except PermissionError as e: # Cache is readonly, we can't update it missing_cache = ( f"Cache directory appears to be read-only ({e}), unable to import " f"downloaded file, providing data in temporary file {f_name} " "instead." ) # FIXME: other kinds of cache problem can occur? if missing_cache: warn(CacheMissingWarning(missing_cache, f_name)) if conf.delete_temporary_downloads_at_exit: global _tempfilestodel _tempfilestodel.append(f_name) return os.path.abspath(f_name) def is_url_in_cache(url_key, pkgname="astropy"): """Check if a download for ``url_key`` is in the cache. The provided ``url_key`` will be the name used in the cache. The contents may have been downloaded from this URL or from a mirror or they may have been provided by the user. See `~download_file` for details. Parameters ---------- url_key : str The URL retrieved pkgname : `str`, optional The package name to use to locate the download cache. i.e. for ``pkgname='astropy'`` the default cache location is ``~/.astropy/cache``. Returns ------- in_cache : bool `True` if a download for ``url_key`` is in the cache, `False` if not or if the cache does not exist at all. See Also -------- cache_contents : obtain a dictionary listing everything in the cache """ try: dldir = _get_download_cache_loc(pkgname) except OSError: return False filename = os.path.join(dldir, _url_to_dirname(url_key), "contents") return os.path.exists(filename) def cache_total_size(pkgname="astropy"): """Return the total size in bytes of all files in the cache.""" size = 0 dldir = _get_download_cache_loc(pkgname=pkgname) for root, dirs, files in os.walk(dldir): size += sum(os.path.getsize(os.path.join(root, name)) for name in files) return size def _do_download_files_in_parallel(kwargs): with astropy.config.paths.set_temp_config(kwargs.pop("temp_config")): with astropy.config.paths.set_temp_cache(kwargs.pop("temp_cache")): return download_file(**kwargs) def download_files_in_parallel( urls, cache="update", show_progress=True, timeout=None, sources=None, multiprocessing_start_method=None, pkgname="astropy", ): """Download multiple files in parallel from the given URLs. Blocks until all files have downloaded. The result is a list of local file paths corresponding to the given urls. The results will be stored in the cache under the values in ``urls`` even if they are obtained from some other location via ``sources``. See `~download_file` for details. Parameters ---------- urls : list of str The URLs to retrieve. cache : bool or "update", optional Whether to use the cache (default is `True`). If "update", always download the remote URLs to see if new data is available and store the result in cache. .. versionchanged:: 4.0 The default was changed to ``"update"`` and setting it to ``False`` will print a Warning and set it to ``"update"`` again, because the function will not work properly without cache. Using ``True`` will work as expected. .. versionchanged:: 3.0 The default was changed to ``True`` and setting it to ``False`` will print a Warning and set it to ``True`` again, because the function will not work properly without cache. show_progress : bool, optional Whether to display a progress bar during the download (default is `True`) timeout : float, optional Timeout for each individual requests in seconds (default is the configurable `astropy.utils.data.Conf.remote_timeout`). sources : dict, optional If provided, for each URL a list of URLs to try to obtain the file from. The result will be stored under the original URL. For any URL in this dictionary, the original URL will *not* be tried unless it is in this list; this is to prevent long waits for a primary server that is known to be inaccessible at the moment. multiprocessing_start_method : str, optional Useful primarily for testing; if in doubt leave it as the default. When using multiprocessing, certain anomalies occur when starting processes with the "spawn" method (the only option on Windows); other anomalies occur with the "fork" method (the default on Linux). pkgname : `str`, optional The package name to use to locate the download cache. i.e. for ``pkgname='astropy'`` the default cache location is ``~/.astropy/cache``. Returns ------- paths : list of str The local file paths corresponding to the downloaded URLs. Notes ----- If a URL is unreachable, the downloading will grind to a halt and the exception will propagate upward, but an unpredictable number of files will have been successfully downloaded and will remain in the cache. """ from .console import ProgressBar if timeout is None: timeout = conf.remote_timeout if sources is None: sources = {} if not cache: # See issue #6662, on windows won't work because the files are removed # again before they can be used. On *NIX systems it will behave as if # cache was set to True because multiprocessing cannot insert the items # in the list of to-be-removed files. This could be fixed, but really, # just use the cache, with update_cache if appropriate. warn( "Disabling the cache does not work because of multiprocessing, " 'it will be set to ``"update"``. You may need to manually remove ' "the cached files with clear_download_cache() afterwards.", AstropyWarning, ) cache = "update" if show_progress: progress = sys.stdout else: progress = io.BytesIO() # Combine duplicate URLs combined_urls = list(set(urls)) combined_paths = ProgressBar.map( _do_download_files_in_parallel, [ dict( remote_url=u, cache=cache, show_progress=False, timeout=timeout, sources=sources.get(u, None), pkgname=pkgname, temp_cache=astropy.config.paths.set_temp_cache._temp_path, temp_config=astropy.config.paths.set_temp_config._temp_path, ) for u in combined_urls ], file=progress, multiprocess=True, multiprocessing_start_method=multiprocessing_start_method, ) paths = [] for url in urls: paths.append(combined_paths[combined_urls.index(url)]) return paths # This is used by download_file and _deltemps to determine the files to delete # when the interpreter exits _tempfilestodel = [] @atexit.register def _deltemps(): global _tempfilestodel if _tempfilestodel is not None: while len(_tempfilestodel) > 0: fn = _tempfilestodel.pop() if os.path.isfile(fn): try: os.remove(fn) except OSError: # oh well we tried # could be held open by some process, on Windows pass elif os.path.isdir(fn): try: shutil.rmtree(fn) except OSError: # couldn't get rid of it, sorry # could be held open by some process, on Windows pass def clear_download_cache(hashorurl=None, pkgname="astropy"): """Clears the data file cache by deleting the local file(s). If a URL is provided, it will be the name used in the cache. The contents may have been downloaded from this URL or from a mirror or they may have been provided by the user. See `~download_file` for details. For the purposes of this function, a file can also be identified by a hash of its contents or by the filename under which the data is stored (as returned by `~download_file`, for example). Parameters ---------- hashorurl : str or None If None, the whole cache is cleared. Otherwise, specify a hash for the cached file that is supposed to be deleted, the full path to a file in the cache that should be deleted, or a URL that should be removed from the cache if present. pkgname : `str`, optional The package name to use to locate the download cache. i.e. for ``pkgname='astropy'`` the default cache location is ``~/.astropy/cache``. """ try: dldir = _get_download_cache_loc(pkgname) except OSError as e: # Problem arose when trying to open the cache # Just a warning, though msg = "Not clearing data cache - cache inaccessible due to " estr = "" if len(e.args) < 1 else (": " + str(e)) warn(CacheMissingWarning(msg + e.__class__.__name__ + estr)) return try: if hashorurl is None: # Optional: delete old incompatible caches too _rmtree(dldir) elif _is_url(hashorurl): filepath = os.path.join(dldir, _url_to_dirname(hashorurl)) _rmtree(filepath) else: # Not a URL, it should be either a filename or a hash filepath = os.path.join(dldir, hashorurl) rp = os.path.relpath(filepath, dldir) if rp.startswith(".."): raise RuntimeError( "attempted to use clear_download_cache on the path " f"{filepath} outside the data cache directory {dldir}" ) d, f = os.path.split(rp) if d and f in ["contents", "url"]: # It's a filename not the hash of a URL # so we want to zap the directory containing the # files "url" and "contents" filepath = os.path.join(dldir, d) if os.path.exists(filepath): _rmtree(filepath) elif len(hashorurl) == 2 * hashlib.md5().digest_size and re.match( r"[0-9a-f]+", hashorurl ): # It's the hash of some file contents, we have to find the right file filename = _find_hash_fn(hashorurl) if filename is not None: clear_download_cache(filename) except OSError as e: msg = "Not clearing data from cache - problem arose " estr = "" if len(e.args) < 1 else (": " + str(e)) warn(CacheMissingWarning(msg + e.__class__.__name__ + estr)) def _get_download_cache_loc(pkgname="astropy"): """Finds the path to the cache directory and makes them if they don't exist. Parameters ---------- pkgname : `str`, optional The package name to use to locate the download cache. i.e. for ``pkgname='astropy'`` the default cache location is ``~/.astropy/cache``. Returns ------- datadir : str The path to the data cache directory. """ try: datadir = os.path.join( astropy.config.paths.get_cache_dir(pkgname), "download", "url" ) if not os.path.exists(datadir): try: os.makedirs(datadir) except OSError: if not os.path.exists(datadir): raise elif not os.path.isdir(datadir): raise OSError(f"Data cache directory {datadir} is not a directory") return datadir except OSError as e: msg = "Remote data cache could not be accessed due to " estr = "" if len(e.args) < 1 else (": " + str(e)) warn(CacheMissingWarning(msg + e.__class__.__name__ + estr)) raise def _url_to_dirname(url): if not _is_url(url): raise ValueError(f"Malformed URL: '{url}'") # Make domain names case-insensitive # Also makes the http:// case-insensitive urlobj = list(urllib.parse.urlsplit(url)) urlobj[1] = urlobj[1].lower() if urlobj[0].lower() in ["http", "https"] and urlobj[1] and urlobj[2] == "": urlobj[2] = "/" url_c = urllib.parse.urlunsplit(urlobj) return hashlib.md5(url_c.encode("utf-8")).hexdigest() class ReadOnlyDict(dict): def __setitem__(self, key, value): raise TypeError("This object is read-only.") _NOTHING = ReadOnlyDict({}) class CacheDamaged(ValueError): """Record the URL or file that was a problem. Using clear_download_cache on the .bad_file or .bad_url attribute, whichever is not None, should resolve this particular problem. """ def __init__(self, *args, bad_urls=None, bad_files=None, **kwargs): super().__init__(*args, **kwargs) self.bad_urls = bad_urls if bad_urls is not None else [] self.bad_files = bad_files if bad_files is not None else [] def check_download_cache(pkgname="astropy"): """Do a consistency check on the cache. .. note:: Since v5.0, this function no longer returns anything. Because the cache is shared by all versions of ``astropy`` in all virtualenvs run by your user, possibly concurrently, it could accumulate problems. This could lead to hard-to-debug problems or wasted space. This function detects a number of incorrect conditions, including nonexistent files that are indexed, files that are indexed but in the wrong place, and, if you request it, files whose content does not match the hash that is indexed. This function also returns a list of non-indexed files. A few will be associated with the shelve object; their exact names depend on the backend used but will probably be based on ``urlmap``. The presence of other files probably indicates that something has gone wrong and inaccessible files have accumulated in the cache. These can be removed with :func:`clear_download_cache`, either passing the filename returned here, or with no arguments to empty the entire cache and return it to a reasonable, if empty, state. Parameters ---------- pkgname : str, optional The package name to use to locate the download cache, i.e., for ``pkgname='astropy'`` the default cache location is ``~/.astropy/cache``. Raises ------ `~astropy.utils.data.CacheDamaged` To indicate a problem with the cache contents; the exception contains a ``.bad_files`` attribute containing a set of filenames to allow the user to use :func:`clear_download_cache` to remove the offending items. OSError, RuntimeError To indicate some problem with the cache structure. This may need a full :func:`clear_download_cache` to resolve, or may indicate some kind of misconfiguration. """ bad_files = set() messages = set() dldir = _get_download_cache_loc(pkgname=pkgname) with os.scandir(dldir) as it: for entry in it: f = os.path.abspath(os.path.join(dldir, entry.name)) if entry.name.startswith("rmtree-"): if f not in _tempfilestodel: bad_files.add(f) messages.add(f"Cache entry {entry.name} not scheduled for deletion") elif entry.is_dir(): for sf in os.listdir(f): if sf in ["url", "contents"]: continue sf = os.path.join(f, sf) bad_files.add(sf) messages.add(f"Unexpected file f{sf}") urlf = os.path.join(f, "url") url = None if not os.path.isfile(urlf): bad_files.add(urlf) messages.add(f"Problem with URL file f{urlf}") else: url = get_file_contents(urlf, encoding="utf-8") if not _is_url(url): bad_files.add(f) messages.add(f"Malformed URL: {url}") else: hashname = _url_to_dirname(url) if entry.name != hashname: bad_files.add(f) messages.add( f"URL hashes to {hashname} but is stored in" f" {entry.name}" ) if not os.path.isfile(os.path.join(f, "contents")): bad_files.add(f) if url is None: messages.add(f"Hash {entry.name} is missing contents") else: messages.add( f"URL {url} with hash {entry.name} is missing contents" ) else: bad_files.add(f) messages.add(f"Left-over non-directory {f} in cache") if bad_files: raise CacheDamaged("\n".join(messages), bad_files=bad_files) @contextlib.contextmanager def _SafeTemporaryDirectory(suffix=None, prefix=None, dir=None): """Temporary directory context manager. This will not raise an exception if the temporary directory goes away before it's supposed to be deleted. Specifically, what is deleted will be the directory *name* produced; if no such directory exists, no exception will be raised. It would be safer to delete it only if it's really the same directory - checked by file descriptor - and if it's still called the same thing. But that opens a platform-specific can of worms. It would also be more robust to use ExitStack and TemporaryDirectory, which is more aggressive about removing readonly things. """ d = mkdtemp(suffix=suffix, prefix=prefix, dir=dir) try: yield d finally: try: shutil.rmtree(d) except OSError: pass def _rmtree(path, replace=None): """More-atomic rmtree. Ignores missing directory.""" with TemporaryDirectory( prefix="rmtree-", dir=os.path.dirname(os.path.abspath(path)) ) as d: try: os.rename(path, os.path.join(d, "to-zap")) except FileNotFoundError: pass except PermissionError: warn( CacheMissingWarning( f"Unable to remove directory {path} because a file in it " "is in use and you are on Windows", path, ) ) raise except OSError as e: if e.errno == errno.EXDEV: warn(e.strerror, AstropyWarning) shutil.move(path, os.path.join(d, "to-zap")) else: raise if replace is not None: try: os.rename(replace, path) except FileExistsError: # already there, fine pass except OSError as e: if e.errno == errno.ENOTEMPTY: # already there, fine pass elif e.errno == errno.EXDEV: warn(e.strerror, AstropyWarning) shutil.move(replace, path) else: raise def import_file_to_cache( url_key, filename, remove_original=False, pkgname="astropy", *, replace=True ): """Import the on-disk file specified by filename to the cache. The provided ``url_key`` will be the name used in the cache. The file should contain the contents of this URL, at least notionally (the URL may be temporarily or permanently unavailable). It is using ``url_key`` that users will request these contents from the cache. See :func:`download_file` for details. If ``url_key`` already exists in the cache, it will be updated to point to these imported contents, and its old contents will be deleted from the cache. Parameters ---------- url_key : str The key to index the file under. This should probably be the URL where the file was located, though if you obtained it from a mirror you should use the URL of the primary location. filename : str The file whose contents you want to import. remove_original : bool Whether to remove the original file (``filename``) once import is complete. pkgname : `str`, optional The package name to use to locate the download cache. i.e. for ``pkgname='astropy'`` the default cache location is ``~/.astropy/cache``. replace : boolean, optional Whether or not to replace an existing object in the cache, if one exists. If replacement is not requested but the object exists, silently pass. """ cache_dir = _get_download_cache_loc(pkgname=pkgname) cache_dirname = _url_to_dirname(url_key) local_dirname = os.path.join(cache_dir, cache_dirname) local_filename = os.path.join(local_dirname, "contents") with _SafeTemporaryDirectory(prefix="temp_dir", dir=cache_dir) as temp_dir: temp_filename = os.path.join(temp_dir, "contents") # Make sure we're on the same filesystem # This will raise an exception if the url_key doesn't turn into a valid filename shutil.copy(filename, temp_filename) with open(os.path.join(temp_dir, "url"), "w", encoding="utf-8") as f: f.write(url_key) if replace: _rmtree(local_dirname, replace=temp_dir) else: try: os.rename(temp_dir, local_dirname) except FileExistsError: # already there, fine pass except OSError as e: if e.errno == errno.ENOTEMPTY: # already there, fine pass else: raise if remove_original: os.remove(filename) return os.path.abspath(local_filename) def get_cached_urls(pkgname="astropy"): """ Get the list of URLs in the cache. Especially useful for looking up what files are stored in your cache when you don't have internet access. The listed URLs are the keys programs should use to access the file contents, but those contents may have actually been obtained from a mirror. See `~download_file` for details. Parameters ---------- pkgname : `str`, optional The package name to use to locate the download cache. i.e. for ``pkgname='astropy'`` the default cache location is ``~/.astropy/cache``. Returns ------- cached_urls : list List of cached URLs. See Also -------- cache_contents : obtain a dictionary listing everything in the cache """ return sorted(cache_contents(pkgname=pkgname).keys()) def cache_contents(pkgname="astropy"): """Obtain a dict mapping cached URLs to filenames. This dictionary is a read-only snapshot of the state of the cache when this function was called. If other processes are actively working with the cache, it is possible for them to delete files that are listed in this dictionary. Use with some caution if you are working on a system that is busy with many running astropy processes, although the same issues apply to most functions in this module. """ r = {} try: dldir = _get_download_cache_loc(pkgname=pkgname) except OSError: return _NOTHING with os.scandir(dldir) as it: for entry in it: if entry.is_dir: url = get_file_contents( os.path.join(dldir, entry.name, "url"), encoding="utf-8" ) r[url] = os.path.abspath(os.path.join(dldir, entry.name, "contents")) return ReadOnlyDict(r) def export_download_cache( filename_or_obj, urls=None, overwrite=False, pkgname="astropy" ): """Exports the cache contents as a ZIP file. Parameters ---------- filename_or_obj : str or file-like Where to put the created ZIP file. Must be something the zipfile module can write to. urls : iterable of str or None The URLs to include in the exported cache. The default is all URLs currently in the cache. If a URL is included in this list but is not currently in the cache, a KeyError will be raised. To ensure that all are in the cache use `~download_file` or `~download_files_in_parallel`. overwrite : bool, optional If filename_or_obj is a filename that exists, it will only be overwritten if this is True. pkgname : `str`, optional The package name to use to locate the download cache. i.e. for ``pkgname='astropy'`` the default cache location is ``~/.astropy/cache``. See Also -------- import_download_cache : import the contents of such a ZIP file import_file_to_cache : import a single file directly """ if urls is None: urls = get_cached_urls(pkgname) with zipfile.ZipFile(filename_or_obj, "w" if overwrite else "x") as z: for u in urls: fn = download_file(u, cache=True, sources=[], pkgname=pkgname) # Do not use os.path.join because ZIP files want # "/" on all platforms z_fn = urllib.parse.quote(u, safe="") z.write(fn, z_fn) def import_download_cache( filename_or_obj, urls=None, update_cache=False, pkgname="astropy" ): """Imports the contents of a ZIP file into the cache. Each member of the ZIP file should be named by a quoted version of the URL whose contents it stores. These names are decoded with :func:`~urllib.parse.unquote`. Parameters ---------- filename_or_obj : str or file-like Where the stored ZIP file is. Must be something the :mod:`~zipfile` module can read from. urls : set of str or list of str or None The URLs to import from the ZIP file. The default is all URLs in the file. update_cache : bool, optional If True, any entry in the ZIP file will overwrite the value in the cache; if False, leave untouched any entry already in the cache. pkgname : `str`, optional The package name to use to locate the download cache. i.e. for ``pkgname='astropy'`` the default cache location is ``~/.astropy/cache``. See Also -------- export_download_cache : export the contents the cache to of such a ZIP file import_file_to_cache : import a single file directly """ with zipfile.ZipFile(filename_or_obj, "r") as z, TemporaryDirectory() as d: for i, zf in enumerate(z.infolist()): url = urllib.parse.unquote(zf.filename) # FIXME(aarchiba): do we want some kind of validation on this URL? # urllib.parse might do something sensible...but what URLs might # they have? # is_url in this file is probably a good check, not just here # but throughout this file. if urls is not None and url not in urls: continue if not update_cache and is_url_in_cache(url, pkgname=pkgname): continue f_temp_name = os.path.join(d, str(i)) with z.open(zf) as f_zip, open(f_temp_name, "wb") as f_temp: block = f_zip.read(conf.download_block_size) while block: f_temp.write(block) block = f_zip.read(conf.download_block_size) import_file_to_cache( url, f_temp_name, remove_original=True, pkgname=pkgname )
a64c62a77dd8fd54912d7e0eae36e212bf7380b2f74373fe44903cee77c6975c
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ A "grab bag" of relatively small general-purpose utilities that don't have a clear module/package to live in. """ import abc import contextlib import difflib import inspect import json import locale import os import re import signal import sys import threading import traceback import unicodedata from collections import OrderedDict, defaultdict from contextlib import contextmanager from astropy.utils.decorators import deprecated __all__ = [ "isiterable", "silence", "format_exception", "NumpyRNGContext", "find_api_page", "is_path_hidden", "walk_skip_hidden", "JsonCustomEncoder", "indent", "dtype_bytes_or_chars", "OrderedDescriptor", "OrderedDescriptorContainer", ] # Because they are deprecated. __doctest_skip__ = ["OrderedDescriptor", "OrderedDescriptorContainer"] NOT_OVERWRITING_MSG = ( "File {} already exists. If you mean to replace it " 'then use the argument "overwrite=True".' ) # A useful regex for tests. _NOT_OVERWRITING_MSG_MATCH = ( r"File .* already exists\. If you mean to " r"replace it then use the argument " r'"overwrite=True"\.' ) def isiterable(obj): """Returns `True` if the given object is iterable.""" try: iter(obj) return True except TypeError: return False def indent(s, shift=1, width=4): """Indent a block of text. The indentation is applied to each line.""" indented = "\n".join(" " * (width * shift) + l if l else "" for l in s.splitlines()) if s[-1] == "\n": indented += "\n" return indented class _DummyFile: """A noop writeable object.""" def write(self, s): pass @contextlib.contextmanager def silence(): """A context manager that silences sys.stdout and sys.stderr.""" old_stdout = sys.stdout old_stderr = sys.stderr sys.stdout = _DummyFile() sys.stderr = _DummyFile() yield sys.stdout = old_stdout sys.stderr = old_stderr def format_exception(msg, *args, **kwargs): """Fill in information about the exception that occurred. Given an exception message string, uses new-style formatting arguments ``{filename}``, ``{lineno}``, ``{func}`` and/or ``{text}`` to fill in information about the exception that occurred. For example: try: 1/0 except: raise ZeroDivisionError( format_except('A divide by zero occurred in {filename} at ' 'line {lineno} of function {func}.')) Any additional positional or keyword arguments passed to this function are also used to format the message. .. note:: This uses `sys.exc_info` to gather up the information needed to fill in the formatting arguments. Since `sys.exc_info` is not carried outside a handled exception, it's not wise to use this outside of an ``except`` clause - if it is, this will substitute '<unknown>' for the 4 formatting arguments. """ tb = traceback.extract_tb(sys.exc_info()[2], limit=1) if len(tb) > 0: filename, lineno, func, text = tb[0] else: filename = lineno = func = text = "<unknown>" return msg.format( *args, filename=filename, lineno=lineno, func=func, text=text, **kwargs ) class NumpyRNGContext: """ A context manager (for use with the ``with`` statement) that will seed the numpy random number generator (RNG) to a specific value, and then restore the RNG state back to whatever it was before. This is primarily intended for use in the astropy testing suit, but it may be useful in ensuring reproducibility of Monte Carlo simulations in a science context. Parameters ---------- seed : int The value to use to seed the numpy RNG Examples -------- A typical use case might be:: with NumpyRNGContext(<some seed value you pick>): from numpy import random randarr = random.randn(100) ... run your test using `randarr` ... #Any code using numpy.random at this indent level will act just as it #would have if it had been before the with statement - e.g. whatever #the default seed is. """ def __init__(self, seed): self.seed = seed def __enter__(self): from numpy import random self.startstate = random.get_state() random.seed(self.seed) def __exit__(self, exc_type, exc_value, traceback): from numpy import random random.set_state(self.startstate) def find_api_page(obj, version=None, openinbrowser=True, timeout=None): """ Determines the URL of the API page for the specified object, and optionally open that page in a web browser. .. note:: You must be connected to the internet for this to function even if ``openinbrowser`` is `False`, unless you provide a local version of the documentation to ``version`` (e.g., ``file:///path/to/docs``). Parameters ---------- obj The object to open the docs for or its fully-qualified name (as a str). version : str The doc version - either a version number like '0.1', 'dev' for the development/latest docs, or a URL to point to a specific location that should be the *base* of the documentation. Defaults to latest if you are on aren't on a release, otherwise, the version you are on. openinbrowser : bool If `True`, the `webbrowser` package will be used to open the doc page in a new web browser window. timeout : number, optional The number of seconds to wait before timing-out the query to the astropy documentation. If not given, the default python stdlib timeout will be used. Returns ------- url : str The loaded URL Raises ------ ValueError If the documentation can't be found """ import webbrowser from zlib import decompress from astropy.utils.data import get_readable_fileobj if ( not isinstance(obj, str) and hasattr(obj, "__module__") and hasattr(obj, "__name__") ): obj = obj.__module__ + "." + obj.__name__ elif inspect.ismodule(obj): obj = obj.__name__ if version is None: from astropy import version if version.release: version = "v" + version.version else: version = "dev" if "://" in version: if version.endswith("index.html"): baseurl = version[:-10] elif version.endswith("/"): baseurl = version else: baseurl = version + "/" elif version == "dev" or version == "latest": baseurl = "http://devdocs.astropy.org/" else: baseurl = f"https://docs.astropy.org/en/{version}/" # Custom request headers; see # https://github.com/astropy/astropy/issues/8990 url = baseurl + "objects.inv" headers = {"User-Agent": f"Astropy/{version}"} with get_readable_fileobj( url, encoding="binary", remote_timeout=timeout, http_headers=headers ) as uf: oiread = uf.read() # need to first read/remove the first four lines, which have info before # the compressed section with the actual object inventory idx = -1 headerlines = [] for _ in range(4): oldidx = idx idx = oiread.index(b"\n", oldidx + 1) headerlines.append(oiread[(oldidx + 1) : idx].decode("utf-8")) # intersphinx version line, project name, and project version ivers, proj, vers, compr = headerlines if "The remainder of this file is compressed using zlib" not in compr: raise ValueError( f"The file downloaded from {baseurl}objects.inv does not seem to be" "the usual Sphinx objects.inv format. Maybe it " "has changed?" ) compressed = oiread[(idx + 1) :] decompressed = decompress(compressed).decode("utf-8") resurl = None for l in decompressed.strip().splitlines(): ls = l.split() name = ls[0] loc = ls[3] if loc.endswith("$"): loc = loc[:-1] + name if name == obj: resurl = baseurl + loc break if resurl is None: raise ValueError(f"Could not find the docs for the object {obj}") elif openinbrowser: webbrowser.open(resurl) return resurl def signal_number_to_name(signum): """ Given an OS signal number, returns a signal name. If the signal number is unknown, returns ``'UNKNOWN'``. """ # Since these numbers and names are platform specific, we use the # builtin signal module and build a reverse mapping. signal_to_name_map = { k: v for v, k in signal.__dict__.items() if v.startswith("SIG") } return signal_to_name_map.get(signum, "UNKNOWN") if sys.platform == "win32": import ctypes def _has_hidden_attribute(filepath): """ Returns True if the given filepath has the hidden attribute on MS-Windows. Based on a post here: https://stackoverflow.com/questions/284115/cross-platform-hidden-file-detection. """ if isinstance(filepath, bytes): filepath = filepath.decode(sys.getfilesystemencoding()) try: attrs = ctypes.windll.kernel32.GetFileAttributesW(filepath) result = bool(attrs & 2) and attrs != -1 except AttributeError: result = False return result else: def _has_hidden_attribute(filepath): return False def is_path_hidden(filepath): """ Determines if a given file or directory is hidden. Parameters ---------- filepath : str The path to a file or directory Returns ------- hidden : bool Returns `True` if the file is hidden """ name = os.path.basename(os.path.abspath(filepath)) if isinstance(name, bytes): is_dotted = name.startswith(b".") else: is_dotted = name.startswith(".") return is_dotted or _has_hidden_attribute(filepath) def walk_skip_hidden(top, onerror=None, followlinks=False): """ A wrapper for `os.walk` that skips hidden files and directories. This function does not have the parameter ``topdown`` from `os.walk`: the directories must always be recursed top-down when using this function. See Also -------- os.walk : For a description of the parameters """ for root, dirs, files in os.walk( top, topdown=True, onerror=onerror, followlinks=followlinks ): # These lists must be updated in-place so os.walk will skip # hidden directories dirs[:] = [d for d in dirs if not is_path_hidden(d)] files[:] = [f for f in files if not is_path_hidden(f)] yield root, dirs, files class JsonCustomEncoder(json.JSONEncoder): """Support for data types that JSON default encoder does not do. This includes: * Numpy array or number * Complex number * Set * Bytes * astropy.UnitBase * astropy.Quantity Examples -------- >>> import json >>> import numpy as np >>> from astropy.utils.misc import JsonCustomEncoder >>> json.dumps(np.arange(3), cls=JsonCustomEncoder) '[0, 1, 2]' """ def default(self, obj): import numpy as np from astropy import units as u if isinstance(obj, u.Quantity): return dict(value=obj.value, unit=obj.unit.to_string()) if isinstance(obj, (np.number, np.ndarray)): return obj.tolist() elif isinstance(obj, complex): return [obj.real, obj.imag] elif isinstance(obj, set): return list(obj) elif isinstance(obj, bytes): # pragma: py3 return obj.decode() elif isinstance(obj, (u.UnitBase, u.FunctionUnitBase)): if obj == u.dimensionless_unscaled: obj = "dimensionless_unit" else: return obj.to_string() return json.JSONEncoder.default(self, obj) def strip_accents(s): """ Remove accents from a Unicode string. This helps with matching "ångström" to "angstrom", for example. """ return "".join( c for c in unicodedata.normalize("NFD", s) if unicodedata.category(c) != "Mn" ) def did_you_mean(s, candidates, n=3, cutoff=0.8, fix=None): """ When a string isn't found in a set of candidates, we can be nice to provide a list of alternatives in the exception. This convenience function helps to format that part of the exception. Parameters ---------- s : str candidates : sequence of str or dict of str keys n : int The maximum number of results to include. See `difflib.get_close_matches`. cutoff : float In the range [0, 1]. Possibilities that don't score at least that similar to word are ignored. See `difflib.get_close_matches`. fix : callable A callable to modify the results after matching. It should take a single string and return a sequence of strings containing the fixed matches. Returns ------- message : str Returns the string "Did you mean X, Y, or Z?", or the empty string if no alternatives were found. """ if isinstance(s, str): s = strip_accents(s) s_lower = s.lower() # Create a mapping from the lower case name to all capitalization # variants of that name. candidates_lower = {} for candidate in candidates: candidate_lower = candidate.lower() candidates_lower.setdefault(candidate_lower, []) candidates_lower[candidate_lower].append(candidate) # The heuristic here is to first try "singularizing" the word. If # that doesn't match anything use difflib to find close matches in # original, lower and upper case. if s_lower.endswith("s") and s_lower[:-1] in candidates_lower: matches = [s_lower[:-1]] else: matches = difflib.get_close_matches( s_lower, candidates_lower, n=n, cutoff=cutoff ) if len(matches): capitalized_matches = set() for match in matches: capitalized_matches.update(candidates_lower[match]) matches = capitalized_matches if fix is not None: mapped_matches = [] for match in matches: mapped_matches.extend(fix(match)) matches = mapped_matches matches = list(set(matches)) matches = sorted(matches) if len(matches) == 1: matches = matches[0] else: matches = ", ".join(matches[:-1]) + " or " + matches[-1] return f"Did you mean {matches}?" return "" _ordered_descriptor_deprecation_message = """\ The {func} {obj_type} is deprecated and may be removed in a future version. You can replace its functionality with a combination of the __init_subclass__ and __set_name__ magic methods introduced in Python 3.6. See https://github.com/astropy/astropy/issues/11094 for recipes on how to replicate their functionality. """ @deprecated("4.3", _ordered_descriptor_deprecation_message) class OrderedDescriptor(metaclass=abc.ABCMeta): """ Base class for descriptors whose order in the class body should be preserved. Intended for use in concert with the `OrderedDescriptorContainer` metaclass. Subclasses of `OrderedDescriptor` must define a value for a class attribute called ``_class_attribute_``. This is the name of a class attribute on the *container* class for these descriptors, which will be set to an `~collections.OrderedDict` at class creation time. This `~collections.OrderedDict` will contain a mapping of all class attributes that were assigned instances of the `OrderedDescriptor` subclass, to the instances themselves. See the documentation for `OrderedDescriptorContainer` for a concrete example. Optionally, subclasses of `OrderedDescriptor` may define a value for a class attribute called ``_name_attribute_``. This should be the name of an attribute on instances of the subclass. When specified, during creation of a class containing these descriptors, the name attribute on each instance will be set to the name of the class attribute it was assigned to on the class. .. note:: Although this class is intended for use with *descriptors* (i.e. classes that define any of the ``__get__``, ``__set__``, or ``__delete__`` magic methods), this base class is not itself a descriptor, and technically this could be used for classes that are not descriptors too. However, use with descriptors is the original intended purpose. """ # This id increments for each OrderedDescriptor instance created, so they # are always ordered in the order they were created. Class bodies are # guaranteed to be executed from top to bottom. Not sure if this is # thread-safe though. _nextid = 1 @property @abc.abstractmethod def _class_attribute_(self): """ Subclasses should define this attribute to the name of an attribute on classes containing this subclass. That attribute will contain the mapping of all instances of that `OrderedDescriptor` subclass defined in the class body. If the same descriptor needs to be used with different classes, each with different names of this attribute, multiple subclasses will be needed. """ _name_attribute_ = None """ Subclasses may optionally define this attribute to specify the name of an attribute on instances of the class that should be filled with the instance's attribute name at class creation time. """ def __init__(self, *args, **kwargs): # The _nextid attribute is shared across all subclasses so that # different subclasses of OrderedDescriptors can be sorted correctly # between themselves self.__order = OrderedDescriptor._nextid OrderedDescriptor._nextid += 1 super().__init__() def __lt__(self, other): """ Defined for convenient sorting of `OrderedDescriptor` instances, which are defined to sort in their creation order. """ if isinstance(self, OrderedDescriptor) and isinstance(other, OrderedDescriptor): try: return self.__order < other.__order except AttributeError: raise RuntimeError( f"Could not determine ordering for {self} and {other}; at least " "one of them is not calling super().__init__ in its " "__init__." ) else: return NotImplemented @deprecated("4.3", _ordered_descriptor_deprecation_message) class OrderedDescriptorContainer(type): """ Classes should use this metaclass if they wish to use `OrderedDescriptor` attributes, which are class attributes that "remember" the order in which they were defined in the class body. Every subclass of `OrderedDescriptor` has an attribute called ``_class_attribute_``. For example, if we have .. code:: python class ExampleDecorator(OrderedDescriptor): _class_attribute_ = '_examples_' Then when a class with the `OrderedDescriptorContainer` metaclass is created, it will automatically be assigned a class attribute ``_examples_`` referencing an `~collections.OrderedDict` containing all instances of ``ExampleDecorator`` defined in the class body, mapped to by the names of the attributes they were assigned to. When subclassing a class with this metaclass, the descriptor dict (i.e. ``_examples_`` in the above example) will *not* contain descriptors inherited from the base class. That is, this only works by default with decorators explicitly defined in the class body. However, the subclass *may* define an attribute ``_inherit_decorators_`` which lists `OrderedDescriptor` classes that *should* be added from base classes. See the examples section below for an example of this. Examples -------- >>> from astropy.utils import OrderedDescriptor, OrderedDescriptorContainer >>> class TypedAttribute(OrderedDescriptor): ... \"\"\" ... Attributes that may only be assigned objects of a specific type, ... or subclasses thereof. For some reason we care about their order. ... \"\"\" ... ... _class_attribute_ = 'typed_attributes' ... _name_attribute_ = 'name' ... # A default name so that instances not attached to a class can ... # still be repr'd; useful for debugging ... name = '<unbound>' ... ... def __init__(self, type): ... # Make sure not to forget to call the super __init__ ... super().__init__() ... self.type = type ... ... def __get__(self, obj, objtype=None): ... if obj is None: ... return self ... if self.name in obj.__dict__: ... return obj.__dict__[self.name] ... else: ... raise AttributeError(self.name) ... ... def __set__(self, obj, value): ... if not isinstance(value, self.type): ... raise ValueError('{0}.{1} must be of type {2!r}'.format( ... obj.__class__.__name__, self.name, self.type)) ... obj.__dict__[self.name] = value ... ... def __delete__(self, obj): ... if self.name in obj.__dict__: ... del obj.__dict__[self.name] ... else: ... raise AttributeError(self.name) ... ... def __repr__(self): ... if isinstance(self.type, tuple) and len(self.type) > 1: ... typestr = '({0})'.format( ... ', '.join(t.__name__ for t in self.type)) ... else: ... typestr = self.type.__name__ ... return '<{0}(name={1}, type={2})>'.format( ... self.__class__.__name__, self.name, typestr) ... Now let's create an example class that uses this ``TypedAttribute``:: >>> class Point2D(metaclass=OrderedDescriptorContainer): ... x = TypedAttribute((float, int)) ... y = TypedAttribute((float, int)) ... ... def __init__(self, x, y): ... self.x, self.y = x, y ... >>> p1 = Point2D(1.0, 2.0) >>> p1.x 1.0 >>> p1.y 2.0 >>> p2 = Point2D('a', 'b') # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError: Point2D.x must be of type (float, int>) We see that ``TypedAttribute`` works more or less as advertised, but there's nothing special about that. Let's see what `OrderedDescriptorContainer` did for us:: >>> Point2D.typed_attributes OrderedDict([('x', <TypedAttribute(name=x, type=(float, int))>), ('y', <TypedAttribute(name=y, type=(float, int))>)]) If we create a subclass, it does *not* by default add inherited descriptors to ``typed_attributes``:: >>> class Point3D(Point2D): ... z = TypedAttribute((float, int)) ... >>> Point3D.typed_attributes OrderedDict([('z', <TypedAttribute(name=z, type=(float, int))>)]) However, if we specify ``_inherit_descriptors_`` from ``Point2D`` then it will do so:: >>> class Point3D(Point2D): ... _inherit_descriptors_ = (TypedAttribute,) ... z = TypedAttribute((float, int)) ... >>> Point3D.typed_attributes OrderedDict([('x', <TypedAttribute(name=x, type=(float, int))>), ('y', <TypedAttribute(name=y, type=(float, int))>), ('z', <TypedAttribute(name=z, type=(float, int))>)]) .. note:: Hopefully it is clear from these examples that this construction also allows a class of type `OrderedDescriptorContainer` to use multiple different `OrderedDescriptor` classes simultaneously. """ _inherit_descriptors_ = () def __init__(cls, cls_name, bases, members): descriptors = defaultdict(list) seen = set() inherit_descriptors = () descr_bases = {} for mro_cls in cls.__mro__: for name, obj in mro_cls.__dict__.items(): if name in seen: # Checks if we've already seen an attribute of the given # name (if so it will override anything of the same name in # any base class) continue seen.add(name) if not isinstance(obj, OrderedDescriptor) or ( inherit_descriptors and not isinstance(obj, inherit_descriptors) ): # The second condition applies when checking any # subclasses, to see if we can inherit any descriptors of # the given type from subclasses (by default inheritance is # disabled unless the class has _inherit_descriptors_ # defined) continue if obj._name_attribute_ is not None: setattr(obj, obj._name_attribute_, name) # Don't just use the descriptor's class directly; instead go # through its MRO and find the class on which _class_attribute_ # is defined directly. This way subclasses of some # OrderedDescriptor *may* override _class_attribute_ and have # its own _class_attribute_, but by default all subclasses of # some OrderedDescriptor are still grouped together # TODO: It might be worth clarifying this in the docs if obj.__class__ not in descr_bases: for obj_cls_base in obj.__class__.__mro__: if "_class_attribute_" in obj_cls_base.__dict__: descr_bases[obj.__class__] = obj_cls_base descriptors[obj_cls_base].append((obj, name)) break else: # Make sure to put obj first for sorting purposes obj_cls_base = descr_bases[obj.__class__] descriptors[obj_cls_base].append((obj, name)) if not getattr(mro_cls, "_inherit_descriptors_", False): # If _inherit_descriptors_ is undefined then we don't inherit # any OrderedDescriptors from any of the base classes, and # there's no reason to continue through the MRO break else: inherit_descriptors = mro_cls._inherit_descriptors_ for descriptor_cls, instances in descriptors.items(): instances.sort() instances = OrderedDict((key, value) for value, key in instances) setattr(cls, descriptor_cls._class_attribute_, instances) super().__init__(cls_name, bases, members) LOCALE_LOCK = threading.Lock() @contextmanager def _set_locale(name): """ Context manager to temporarily set the locale to ``name``. An example is setting locale to "C" so that the C strtod() function will use "." as the decimal point to enable consistent numerical string parsing. Note that one cannot nest multiple _set_locale() context manager statements as this causes a threading lock. This code taken from https://stackoverflow.com/questions/18593661/how-do-i-strftime-a-date-object-in-a-different-locale. Parameters ---------- name : str Locale name, e.g. "C" or "fr_FR". """ name = str(name) with LOCALE_LOCK: saved = locale.setlocale(locale.LC_ALL) if saved == name: # Don't do anything if locale is already the requested locale yield else: try: locale.setlocale(locale.LC_ALL, name) yield finally: locale.setlocale(locale.LC_ALL, saved) set_locale = deprecated("4.0")(_set_locale) set_locale.__doc__ = """Deprecated version of :func:`_set_locale` above. See https://github.com/astropy/astropy/issues/9196 """ def dtype_bytes_or_chars(dtype): """ Parse the number out of a dtype.str value like '<U5' or '<f8'. See #5819 for discussion on the need for this function for getting the number of characters corresponding to a string dtype. Parameters ---------- dtype : numpy dtype object Input dtype Returns ------- bytes_or_chars : int or None Bits (for numeric types) or characters (for string types) """ match = re.search(r"(\d+)$", dtype.str) out = int(match.group(1)) if match else None return out def _hungry_for(option): # pragma: no cover """ Open browser loaded with ``option`` options near you. *Disclaimers: Payments not included. Astropy is not responsible for any liability from using this function.* .. note:: Accuracy depends on your browser settings. """ import webbrowser webbrowser.open(f"https://www.google.com/search?q={option}+near+me") def pizza(): # pragma: no cover """``/pizza``.""" _hungry_for("pizza") def coffee(is_adam=False, is_brigitta=False): # pragma: no cover """``/coffee``.""" if is_adam and is_brigitta: raise ValueError("There can be only one!") if is_adam: option = "fresh+third+wave+coffee" elif is_brigitta: option = "decent+espresso" else: option = "coffee" _hungry_for(option)
9e05e6c2db08712e0c9654a8540d607cb8e4153f7e43028512687ac5644fc3f3
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Combine 3 images to produce a properly-scaled RGB image following Lupton et al. (2004). The three images must be aligned and have the same pixel scale and size. For details, see : https://ui.adsabs.harvard.edu/abs/2004PASP..116..133L """ import numpy as np from . import ZScaleInterval __all__ = ["make_lupton_rgb"] def compute_intensity(image_r, image_g=None, image_b=None): """ Return a naive total intensity from the red, blue, and green intensities. Parameters ---------- image_r : ndarray Intensity of image to be mapped to red; or total intensity if ``image_g`` and ``image_b`` are None. image_g : ndarray, optional Intensity of image to be mapped to green. image_b : ndarray, optional Intensity of image to be mapped to blue. Returns ------- intensity : ndarray Total intensity from the red, blue and green intensities, or ``image_r`` if green and blue images are not provided. """ if image_g is None or image_b is None: if not (image_g is None and image_b is None): raise ValueError( "please specify either a single image or red, green, and blue images." ) return image_r intensity = (image_r + image_g + image_b) / 3.0 # Repack into whatever type was passed to us return np.asarray(intensity, dtype=image_r.dtype) class Mapping: """ Baseclass to map red, blue, green intensities into uint8 values. Parameters ---------- minimum : float or sequence(3) Intensity that should be mapped to black (a scalar or array for R, G, B). image : ndarray, optional An image used to calculate some parameters of some mappings. """ def __init__(self, minimum=None, image=None): self._uint8Max = float(np.iinfo(np.uint8).max) try: len(minimum) except TypeError: minimum = 3 * [minimum] if len(minimum) != 3: raise ValueError("please provide 1 or 3 values for minimum.") self.minimum = minimum self._image = np.asarray(image) def make_rgb_image(self, image_r, image_g, image_b): """ Convert 3 arrays, image_r, image_g, and image_b into an 8-bit RGB image. Parameters ---------- image_r : ndarray Image to map to red. image_g : ndarray Image to map to green. image_b : ndarray Image to map to blue. Returns ------- RGBimage : ndarray RGB (integer, 8-bits per channel) color image as an NxNx3 numpy array. """ image_r = np.asarray(image_r) image_g = np.asarray(image_g) image_b = np.asarray(image_b) if (image_r.shape != image_g.shape) or (image_g.shape != image_b.shape): msg = "The image shapes must match. r: {}, g: {} b: {}" raise ValueError(msg.format(image_r.shape, image_g.shape, image_b.shape)) return np.dstack( self._convert_images_to_uint8(image_r, image_g, image_b) ).astype(np.uint8) def intensity(self, image_r, image_g, image_b): """ Return the total intensity from the red, blue, and green intensities. This is a naive computation, and may be overridden by subclasses. Parameters ---------- image_r : ndarray Intensity of image to be mapped to red; or total intensity if ``image_g`` and ``image_b`` are None. image_g : ndarray, optional Intensity of image to be mapped to green. image_b : ndarray, optional Intensity of image to be mapped to blue. Returns ------- intensity : ndarray Total intensity from the red, blue and green intensities, or ``image_r`` if green and blue images are not provided. """ return compute_intensity(image_r, image_g, image_b) def map_intensity_to_uint8(self, I): """ Return an array which, when multiplied by an image, returns that image mapped to the range of a uint8, [0, 255] (but not converted to uint8). The intensity is assumed to have had minimum subtracted (as that can be done per-band). Parameters ---------- I : ndarray Intensity to be mapped. Returns ------- mapped_I : ndarray ``I`` mapped to uint8 """ with np.errstate(invalid="ignore", divide="ignore"): return np.clip(I, 0, self._uint8Max) def _convert_images_to_uint8(self, image_r, image_g, image_b): """ Use the mapping to convert images image_r, image_g, and image_b to a triplet of uint8 images. """ image_r = image_r - self.minimum[0] # n.b. makes copy image_g = image_g - self.minimum[1] image_b = image_b - self.minimum[2] fac = self.map_intensity_to_uint8(self.intensity(image_r, image_g, image_b)) image_rgb = [image_r, image_g, image_b] for c in image_rgb: c *= fac with np.errstate(invalid="ignore"): c[c < 0] = 0 # individual bands can still be < 0, even if fac isn't pixmax = self._uint8Max # copies -- could work row by row to minimise memory usage r0, g0, b0 = image_rgb # n.b. np.where can't and doesn't short-circuit with np.errstate(invalid="ignore", divide="ignore"): for i, c in enumerate(image_rgb): c = np.where( r0 > g0, np.where( r0 > b0, np.where(r0 >= pixmax, c * pixmax / r0, c), np.where(b0 >= pixmax, c * pixmax / b0, c), ), np.where( g0 > b0, np.where(g0 >= pixmax, c * pixmax / g0, c), np.where(b0 >= pixmax, c * pixmax / b0, c), ), ).astype(np.uint8) c[c > pixmax] = pixmax image_rgb[i] = c return image_rgb class LinearMapping(Mapping): """ A linear map map of red, blue, green intensities into uint8 values. A linear stretch from [minimum, maximum]. If one or both are omitted use image min and/or max to set them. Parameters ---------- minimum : float Intensity that should be mapped to black (a scalar or array for R, G, B). maximum : float Intensity that should be mapped to white (a scalar). """ def __init__(self, minimum=None, maximum=None, image=None): if minimum is None or maximum is None: if image is None: raise ValueError( "you must provide an image if you don't " "set both minimum and maximum" ) if minimum is None: minimum = image.min() if maximum is None: maximum = image.max() Mapping.__init__(self, minimum=minimum, image=image) self.maximum = maximum if maximum is None: self._range = None else: if maximum == minimum: raise ValueError("minimum and maximum values must not be equal") self._range = float(maximum - minimum) def map_intensity_to_uint8(self, I): # n.b. np.where can't and doesn't short-circuit with np.errstate(invalid="ignore", divide="ignore"): return np.where( I <= 0, 0, np.where( I >= self._range, self._uint8Max / I, self._uint8Max / self._range ), ) class AsinhMapping(Mapping): """ A mapping for an asinh stretch (preserving colours independent of brightness). x = asinh(Q (I - minimum)/stretch)/Q This reduces to a linear stretch if Q == 0 See https://ui.adsabs.harvard.edu/abs/2004PASP..116..133L Parameters ---------- minimum : float Intensity that should be mapped to black (a scalar or array for R, G, B). stretch : float The linear stretch of the image. Q : float The asinh softening parameter. """ def __init__(self, minimum, stretch, Q=8): Mapping.__init__(self, minimum) # 32bit floating point machine epsilon; sys.float_info.epsilon is 64bit epsilon = 1.0 / 2**23 if abs(Q) < epsilon: Q = 0.1 else: Qmax = 1e10 if Q > Qmax: Q = Qmax frac = 0.1 # gradient estimated using frac*stretch is _slope self._slope = frac * self._uint8Max / np.arcsinh(frac * Q) self._soften = Q / float(stretch) def map_intensity_to_uint8(self, I): # n.b. np.where can't and doesn't short-circuit with np.errstate(invalid="ignore", divide="ignore"): return np.where(I <= 0, 0, np.arcsinh(I * self._soften) * self._slope / I) class AsinhZScaleMapping(AsinhMapping): """ A mapping for an asinh stretch, estimating the linear stretch by zscale. x = asinh(Q (I - z1)/(z2 - z1))/Q Parameters ---------- image1 : ndarray or a list of arrays The image to analyse, or a list of 3 images to be converted to an intensity image. image2 : ndarray, optional the second image to analyse (must be specified with image3). image3 : ndarray, optional the third image to analyse (must be specified with image2). Q : float, optional The asinh softening parameter. Default is 8. pedestal : float or sequence(3), optional The value, or array of 3 values, to subtract from the images; or None. Notes ----- pedestal, if not None, is removed from the images when calculating the zscale stretch, and added back into Mapping.minimum[] """ def __init__(self, image1, image2=None, image3=None, Q=8, pedestal=None): if image2 is None or image3 is None: if not (image2 is None and image3 is None): raise ValueError( "please specify either a single image or three images." ) image = [image1] else: image = [image1, image2, image3] if pedestal is not None: try: len(pedestal) except TypeError: pedestal = 3 * [pedestal] if len(pedestal) != 3: raise ValueError("please provide 1 or 3 pedestals.") image = list(image) # needs to be mutable for i, im in enumerate(image): if pedestal[i] != 0.0: image[i] = im - pedestal[i] # n.b. a copy else: pedestal = len(image) * [0.0] image = compute_intensity(*image) zscale_limits = ZScaleInterval().get_limits(image) zscale = LinearMapping(*zscale_limits, image=image) # zscale.minimum is always a triple stretch = zscale.maximum - zscale.minimum[0] minimum = zscale.minimum for i, level in enumerate(pedestal): minimum[i] += level AsinhMapping.__init__(self, minimum, stretch, Q) self._image = image def make_lupton_rgb( image_r, image_g, image_b, minimum=0, stretch=5, Q=8, filename=None ): """ Return a Red/Green/Blue color image from up to 3 images using an asinh stretch. The input images can be int or float, and in any range or bit-depth. For a more detailed look at the use of this method, see the document :ref:`astropy:astropy-visualization-rgb`. Parameters ---------- image_r : ndarray Image to map to red. image_g : ndarray Image to map to green. image_b : ndarray Image to map to blue. minimum : float Intensity that should be mapped to black (a scalar or array for R, G, B). stretch : float The linear stretch of the image. Q : float The asinh softening parameter. filename : str Write the resulting RGB image to a file (file type determined from extension). Returns ------- rgb : ndarray RGB (integer, 8-bits per channel) color image as an NxNx3 numpy array. """ asinhMap = AsinhMapping(minimum, stretch, Q) rgb = asinhMap.make_rgb_image(image_r, image_g, image_b) if filename: import matplotlib.image matplotlib.image.imsave(filename, rgb, origin="lower") return rgb